コード例 #1
0
ファイル: libde265-dec.c プロジェクト: 0p1pp1/gst-plugins-bad
static GstFlowReturn
_gst_libde265_image_available (GstVideoDecoder * decoder, int width, int height)
{
  GstLibde265Dec *dec = GST_LIBDE265_DEC (decoder);

  if (G_UNLIKELY (dec->output_state == NULL
          || width != dec->output_state->info.width
          || height != dec->output_state->info.height)) {
    GstVideoCodecState *state =
        gst_video_decoder_set_output_state (decoder, GST_VIDEO_FORMAT_I420,
        width, height, dec->input_state);
    if (state == NULL) {
      GST_ERROR_OBJECT (dec, "Failed to set output state");
      return GST_FLOW_ERROR;
    }
    if (!gst_video_decoder_negotiate (decoder)) {
      GST_ERROR_OBJECT (dec, "Failed to negotiate format");
      gst_video_codec_state_unref (state);
      return GST_FLOW_ERROR;
    }
    if (dec->output_state != NULL) {
      gst_video_codec_state_unref (dec->output_state);
    }
    dec->output_state = state;
    GST_DEBUG_OBJECT (dec, "Frame dimensions are %d x %d", width, height);
  }

  return GST_FLOW_OK;
}
コード例 #2
0
ファイル: gstmfcdec.c プロジェクト: PeterXu/gst-mobile
static gboolean
gst_mfc_dec_negotiate (GstVideoDecoder * decoder)
{
  GstMFCDec *self = GST_MFC_DEC (decoder);
  GstVideoCodecState *state;
  GstCaps *allowed_caps;
  GstVideoFormat format = GST_VIDEO_FORMAT_NV12;

  allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_DECODER_SRC_PAD (self));
  allowed_caps = gst_caps_truncate (allowed_caps);
  allowed_caps = gst_caps_fixate (allowed_caps);
  if (!gst_caps_is_empty (allowed_caps)) {
    const gchar *format_str;
    GstStructure *s = gst_caps_get_structure (allowed_caps, 0);

    format_str = gst_structure_get_string (s, "format");
    if (format_str)
      format = gst_video_format_from_string (format_str);
  }
  gst_caps_unref (allowed_caps);

  self->format = format;
  state =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (self),
      self->format, self->crop_width, self->crop_height, self->input_state);

  gst_video_codec_state_unref (state);

  return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder);
}
コード例 #3
0
static GstFlowReturn _gst_libde265_image_available(VIDEO_DECODER_BASE * parse,
    const struct de265_image *image)
{
    GstLibde265Dec *dec = GST_LIBDE265_DEC (parse);
    int width = de265_get_image_width(image, 0);
    int height = de265_get_image_height(image, 0);

    if (G_UNLIKELY(width != dec->width || height != dec->height)) {
#if GST_CHECK_VERSION(1,0,0)
        GstVideoCodecState *state = gst_video_decoder_set_output_state (parse, GST_VIDEO_FORMAT_I420, width, height, dec->input_state);
        g_assert (state != NULL);
        if (dec->fps_n > 0) {
            state->info.fps_n = dec->fps_n;
            state->info.fps_d = dec->fps_d;
        }
        gst_video_decoder_negotiate(parse);
#else
        GstVideoState *state = gst_base_video_decoder_get_state (parse);
        g_assert (state != NULL);
        state->format = GST_VIDEO_FORMAT_I420;
        state->width = width;
        state->height = height;
        if (dec->fps_n > 0) {
            state->fps_n = dec->fps_n;
            state->fps_d = dec->fps_d;
        }
        gst_base_video_decoder_set_src_caps (parse);
#endif
        GST_DEBUG ("Frame dimensions are %d x %d\n", width, height);
        dec->width = width;
        dec->height = height;
    }
    
    return HAVE_FRAME (parse);
}
コード例 #4
0
ファイル: gstjpegdec.c プロジェクト: Lachann/gst-plugins-good
static void
gst_jpeg_dec_negotiate (GstJpegDec * dec, gint width, gint height, gint clrspc)
{
  GstVideoCodecState *outstate;
  GstVideoInfo *info;
  GstVideoFormat format;

  switch (clrspc) {
    case JCS_RGB:
      format = GST_VIDEO_FORMAT_RGB;
      break;
    case JCS_GRAYSCALE:
      format = GST_VIDEO_FORMAT_GRAY8;
      break;
    default:
      format = GST_VIDEO_FORMAT_I420;
      break;
  }

  /* Compare to currently configured output state */
  outstate = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (dec));
  if (outstate) {
    info = &outstate->info;

    if (width == GST_VIDEO_INFO_WIDTH (info) &&
        height == GST_VIDEO_INFO_HEIGHT (info) &&
        format == GST_VIDEO_INFO_FORMAT (info)) {
      gst_video_codec_state_unref (outstate);
      return;
    }
    gst_video_codec_state_unref (outstate);
  }

  outstate =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), format,
      width, height, dec->input_state);

  switch (clrspc) {
    case JCS_RGB:
    case JCS_GRAYSCALE:
      break;
    default:
      outstate->info.colorimetry.range = GST_VIDEO_COLOR_RANGE_0_255;
      outstate->info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601;
      outstate->info.colorimetry.transfer = GST_VIDEO_TRANSFER_UNKNOWN;
      outstate->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN;
      break;
  }

  gst_video_codec_state_unref (outstate);

  gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec));

  GST_DEBUG_OBJECT (dec, "max_v_samp_factor=%d", dec->cinfo.max_v_samp_factor);
  GST_DEBUG_OBJECT (dec, "max_h_samp_factor=%d", dec->cinfo.max_h_samp_factor);
}
コード例 #5
0
static gboolean
gst_video_decoder_tester_set_format (GstVideoDecoder * dec,
    GstVideoCodecState * state)
{
  GstVideoCodecState *res = gst_video_decoder_set_output_state (dec,
      GST_VIDEO_FORMAT_GRAY8, TEST_VIDEO_WIDTH, TEST_VIDEO_HEIGHT, NULL);

  gst_video_codec_state_unref (res);
  return TRUE;
}
コード例 #6
0
ファイル: gstrsvgdec.c プロジェクト: cbetz421/gst-plugins-bad
static gboolean
gst_rsvg_dec_set_format (GstVideoDecoder * decoder, GstVideoCodecState * state)
{
  GstRsvgDec *rsvg = GST_RSVG_DEC (decoder);
  GstVideoInfo *info = &state->info;

  if (rsvg->input_state)
    gst_video_codec_state_unref (rsvg->input_state);
  rsvg->input_state = gst_video_codec_state_ref (state);

  /* Create the output state */
  gst_video_decoder_set_output_state (decoder, GST_RSVG_VIDEO_FORMAT,
      GST_VIDEO_INFO_WIDTH (info), GST_VIDEO_INFO_HEIGHT (info),
      rsvg->input_state);

  return TRUE;
}
コード例 #7
0
ファイル: vtdec.c プロジェクト: shakin/gst-plugins-bad
static gboolean
gst_vtdec_negotiate_output_format (GstVtdec * vtdec,
    GstVideoCodecState * input_state)
{
  GstCaps *caps = NULL, *peercaps = NULL, *templcaps;
  GstVideoFormat output_format;
  GstVideoCodecState *output_state = NULL;
  GstCapsFeatures *features;
  GstStructure *structure;
  const gchar *s;

  peercaps = gst_pad_peer_query_caps (GST_VIDEO_DECODER_SRC_PAD (vtdec), NULL);

  /* Check if output supports GL caps by preference */
  templcaps = gst_pad_get_pad_template_caps (GST_VIDEO_DECODER_SRC_PAD (vtdec));
  caps =
      gst_caps_intersect_full (templcaps, peercaps, GST_CAPS_INTERSECT_FIRST);

  gst_caps_unref (peercaps);
  gst_caps_unref (templcaps);

  caps = gst_caps_truncate (caps);
  structure = gst_caps_get_structure (caps, 0);
  s = gst_structure_get_string (structure, "format");
  output_format = gst_video_format_from_string (s);
  features = gst_caps_features_copy (gst_caps_get_features (caps, 0));

  gst_caps_unref (caps);

  if (!gst_vtdec_create_session (vtdec, output_format)) {
    gst_caps_features_free (features);
    return FALSE;
  }

  output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (vtdec),
      output_format, vtdec->video_info.width, vtdec->video_info.height,
      input_state);

  output_state->caps = gst_video_info_to_caps (&output_state->info);
  gst_caps_set_features (output_state->caps, 0, features);

  return TRUE;
}
コード例 #8
0
ファイル: gstjpegdec.c プロジェクト: an146/gst-plugins-good
static void
gst_jpeg_dec_negotiate (GstJpegDec * dec, gint width, gint height, gint clrspc)
{
  GstVideoCodecState *outstate;
  GstVideoInfo *info;
  GstVideoFormat format;

  switch (clrspc) {
    case JCS_RGB:
      format = GST_VIDEO_FORMAT_RGB;
      break;
    case JCS_GRAYSCALE:
      format = GST_VIDEO_FORMAT_GRAY8;
      break;
    default:
      format = GST_VIDEO_FORMAT_I420;
      break;
  }

  /* Compare to currently configured output state */
  outstate = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (dec));
  if (outstate) {
    info = &outstate->info;

    if (width == GST_VIDEO_INFO_WIDTH (info) &&
        height == GST_VIDEO_INFO_HEIGHT (info) &&
        format == GST_VIDEO_INFO_FORMAT (info)) {
      gst_video_codec_state_unref (outstate);
      return;
    }
    gst_video_codec_state_unref (outstate);
  }

  outstate =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), format,
      width, height, dec->input_state);

  gst_video_codec_state_unref (outstate);

  GST_DEBUG_OBJECT (dec, "max_v_samp_factor=%d", dec->cinfo.max_v_samp_factor);
  GST_DEBUG_OBJECT (dec, "max_h_samp_factor=%d", dec->cinfo.max_h_samp_factor);
}
コード例 #9
0
ファイル: gstamlvdec.c プロジェクト: vitmod/buildroot-aml
static gboolean gst_aml_vdec_set_format(GstVideoDecoder *dec, GstVideoCodecState *state)
{
	gboolean ret = FALSE;
	GstStructure *structure;
	const char *name;
	GstVideoInfo *info;
	gint par_num, par_den;
	GstVideoFormat fmt;
	GstAmlVdec *amlvdec = GST_AMLVDEC(dec);

	g_return_val_if_fail(state != NULL, FALSE);
	if (amlvdec->input_state)
		gst_video_codec_state_unref(amlvdec->input_state);
	amlvdec->input_state = gst_video_codec_state_ref(state);

	structure = gst_caps_get_structure(state->caps, 0);
	name = gst_structure_get_name(structure);
	GST_INFO_OBJECT(amlvdec, "%s = %s", __FUNCTION__, name);
	if (name) {
		ret = gst_set_vstream_info(amlvdec, state->caps);
		if (!amlvdec->output_state) {
			info = &amlvdec->input_state->info;
			fmt = GST_VIDEO_FORMAT_xRGB;
			GST_VIDEO_INFO_WIDTH (info) = amlvdec->pcodec->am_sysinfo.width;
			GST_VIDEO_INFO_HEIGHT (info) = amlvdec->pcodec->am_sysinfo.height;
			par_num = GST_VIDEO_INFO_PAR_N(info);
			par_den = GST_VIDEO_INFO_PAR_D(info);
			amlvdec->output_state = gst_video_decoder_set_output_state(GST_VIDEO_DECODER(amlvdec),
					fmt, info->width,
					info->height,
					amlvdec->input_state);
			gst_video_decoder_negotiate (GST_VIDEO_DECODER (amlvdec));
		}

	}
	return ret;
}
コード例 #10
0
static GstFlowReturn
daala_handle_type_packet (GstDaalaDec * dec)
{
  gint par_num, par_den;
  GstFlowReturn ret = GST_FLOW_OK;
  GstVideoCodecState *state;
  GstVideoFormat fmt;
  GstVideoInfo *info;

  if (!dec->input_state)
    return GST_FLOW_NOT_NEGOTIATED;

  info = &dec->input_state->info;

  GST_DEBUG_OBJECT (dec, "fps %d/%d, PAR %d/%d",
      dec->info.timebase_numerator, dec->info.timebase_denominator,
      dec->info.pixel_aspect_numerator, dec->info.pixel_aspect_denominator);

  /* calculate par
   * the info.aspect_* values reflect PAR;
   * 0:x and x:0 are allowed and can be interpreted as 1:1.
   */
  par_num = GST_VIDEO_INFO_PAR_N (info);
  par_den = GST_VIDEO_INFO_PAR_D (info);

  /* If we have a default PAR, see if the decoder specified a different one */
  if (par_num == 1 && par_den == 1 &&
      (dec->info.pixel_aspect_numerator != 0
          && dec->info.pixel_aspect_denominator != 0)) {
    par_num = dec->info.pixel_aspect_numerator;
    par_den = dec->info.pixel_aspect_denominator;
  }
  /* daala has:
   *
   *  width/height : dimension of the encoded frame 
   *  pic_width/pic_height : dimension of the visible part
   *  pic_x/pic_y : offset in encoded frame where visible part starts
   */
  GST_DEBUG_OBJECT (dec, "dimension %dx%d, PAR %d/%d", dec->info.pic_width,
      dec->info.pic_height, par_num, par_den);

  if (dec->info.nplanes == 3 && dec->info.plane_info[0].xdec == 0 &&
      dec->info.plane_info[0].ydec == 0 &&
      dec->info.plane_info[1].xdec == 1 &&
      dec->info.plane_info[1].ydec == 1 &&
      dec->info.plane_info[2].xdec == 1 && dec->info.plane_info[2].ydec == 1) {
    fmt = GST_VIDEO_FORMAT_I420;
  } else if (dec->info.nplanes == 3 && dec->info.plane_info[0].xdec == 0 &&
      dec->info.plane_info[0].ydec == 0 &&
      dec->info.plane_info[1].xdec == 0 &&
      dec->info.plane_info[1].ydec == 0 &&
      dec->info.plane_info[2].xdec == 0 && dec->info.plane_info[2].ydec == 0) {
    fmt = GST_VIDEO_FORMAT_Y444;
  } else {
    goto unsupported_format;
  }

  GST_VIDEO_INFO_WIDTH (info) = dec->info.pic_width;
  GST_VIDEO_INFO_HEIGHT (info) = dec->info.pic_height;

  /* done */
  dec->decoder = daala_decode_alloc (&dec->info, dec->setup);

  /* Create the output state */
  dec->output_state = state =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), fmt,
      info->width, info->height, dec->input_state);

  /* FIXME : Do we still need to set fps/par now that we pass the reference input stream ? */
  state->info.fps_n = dec->info.timebase_numerator;
  state->info.fps_d = dec->info.timebase_denominator;
  state->info.par_n = par_num;
  state->info.par_d = par_den;

  gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec));

  dec->have_header = TRUE;

  return ret;

  /* ERRORS */
unsupported_format:
  {
    GST_ERROR_OBJECT (dec, "Invalid pixel format");
    return GST_FLOW_ERROR;
  }
}
コード例 #11
0
static GstFlowReturn
gst_vdp_h264_dec_idr (GstVdpH264Dec * h264_dec, GstVideoCodecFrame * frame,
    GstH264SliceHdr * slice)
{
  GstH264SPS *seq;

  GST_DEBUG_OBJECT (h264_dec, "Handling IDR slice");

  h264_dec->poc_msb = 0;
  h264_dec->prev_poc_lsb = 0;

  if (slice->dec_ref_pic_marking.no_output_of_prior_pics_flag)
    gst_h264_dpb_flush (h264_dec->dpb, FALSE);
  else
    gst_h264_dpb_flush (h264_dec->dpb, TRUE);

  if (slice->dec_ref_pic_marking.long_term_reference_flag)
    g_object_set (h264_dec->dpb, "max-longterm-frame-idx", 0, NULL);
  else
    g_object_set (h264_dec->dpb, "max-longterm-frame-idx", -1, NULL);

  seq = slice->pps->sequence;

  if (seq->id != h264_dec->current_sps) {
    GstVideoCodecState *state;
    VdpDecoderProfile profile;
    GstFlowReturn ret;

    GST_DEBUG_OBJECT (h264_dec, "Sequence changed !");

    state =
        gst_video_decoder_set_output_state (GST_VIDEO_DECODER (h264_dec),
        GST_VIDEO_FORMAT_YV12, seq->width, seq->height, h264_dec->input_state);

    /* calculate framerate if we haven't got one */
    if (state->info.fps_n == 0) {
      state->info.fps_n = seq->fps_num;
      state->info.fps_d = seq->fps_den;
    }
    if (state->info.par_n == 0 && seq->vui_parameters_present_flag) {
      state->info.par_n = seq->vui_parameters.par_n;
      state->info.par_d = seq->vui_parameters.par_d;
    }

    if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (h264_dec)))
      goto nego_fail;

    switch (seq->profile_idc) {
      case 66:
        profile = VDP_DECODER_PROFILE_H264_BASELINE;
        break;

      case 77:
        profile = VDP_DECODER_PROFILE_H264_MAIN;
        break;

      case 100:
        profile = VDP_DECODER_PROFILE_H264_HIGH;
        break;

      default:
        goto profile_not_suported;
    }

    ret = gst_vdp_decoder_init_decoder (GST_VDP_DECODER (h264_dec), profile,
        seq->num_ref_frames, h264_dec->input_state);
    if (ret != GST_FLOW_OK)
      return ret;

    g_object_set (h264_dec->dpb, "num-ref-frames", seq->num_ref_frames, NULL);

    h264_dec->current_sps = seq->id;
  }

  return GST_FLOW_OK;

profile_not_suported:
  {
    GST_ELEMENT_ERROR (h264_dec, STREAM, WRONG_TYPE,
        ("vdpauh264dec doesn't support this streams profile"),
        ("profile_idc: %d", seq->profile_idc));
    return GST_FLOW_ERROR;
  }

nego_fail:
  {
    GST_ERROR_OBJECT (h264_dec, "Negotiation failed");
    return GST_FLOW_NOT_NEGOTIATED;
  }
}
コード例 #12
0
ファイル: vtdec.c プロジェクト: luisbg/gst-plugins-bad
static gboolean
gst_vtdec_negotiate (GstVideoDecoder * decoder)
{
  GstVideoCodecState *output_state = NULL;
  GstCaps *peercaps = NULL, *caps = NULL, *templcaps = NULL, *prevcaps = NULL;
  GstVideoFormat format;
  GstStructure *structure;
  const gchar *s;
  GstVtdec *vtdec;
  OSStatus err = noErr;
  GstCapsFeatures *features = NULL;
  gboolean output_textures;

  vtdec = GST_VTDEC (decoder);
  if (vtdec->session)
    gst_vtdec_push_frames_if_needed (vtdec, TRUE, FALSE);

  output_state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (vtdec));
  if (output_state) {
    prevcaps = gst_caps_ref (output_state->caps);
    gst_video_codec_state_unref (output_state);
  }

  peercaps = gst_pad_peer_query_caps (GST_VIDEO_DECODER_SRC_PAD (vtdec), NULL);
  if (prevcaps && gst_caps_can_intersect (prevcaps, peercaps)) {
    /* The hardware decoder can become (temporarily) unavailable across
     * VTDecompressionSessionCreate/Destroy calls. So if the currently configured
     * caps are still accepted by downstream we keep them so we don't have to
     * destroy and recreate the session.
     */
    GST_INFO_OBJECT (vtdec,
        "current and peer caps are compatible, keeping current caps");
    caps = gst_caps_ref (prevcaps);
  } else {
    templcaps =
        gst_pad_get_pad_template_caps (GST_VIDEO_DECODER_SRC_PAD (decoder));
    caps =
        gst_caps_intersect_full (peercaps, templcaps, GST_CAPS_INTERSECT_FIRST);
    gst_caps_unref (templcaps);
  }
  gst_caps_unref (peercaps);

  caps = gst_caps_truncate (gst_caps_make_writable (caps));
  structure = gst_caps_get_structure (caps, 0);
  s = gst_structure_get_string (structure, "format");
  format = gst_video_format_from_string (s);
  features = gst_caps_get_features (caps, 0);
  if (features)
    features = gst_caps_features_copy (features);

  output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (vtdec),
      format, vtdec->video_info.width, vtdec->video_info.height,
      vtdec->input_state);
  output_state->caps = gst_video_info_to_caps (&output_state->info);
  if (features) {
    gst_caps_set_features (output_state->caps, 0, features);
    output_textures =
        gst_caps_features_contains (features,
        GST_CAPS_FEATURE_MEMORY_GL_MEMORY);
    if (output_textures)
      gst_caps_set_simple (output_state->caps, "texture-target", G_TYPE_STRING,
#if !HAVE_IOS
          GST_GL_TEXTURE_TARGET_RECTANGLE_STR,
#else
          GST_GL_TEXTURE_TARGET_2D_STR,
#endif
          NULL);
  }
  gst_caps_unref (caps);

  if (!prevcaps || !gst_caps_is_equal (prevcaps, output_state->caps)) {
    gboolean renegotiating = vtdec->session != NULL;

    GST_INFO_OBJECT (vtdec,
        "negotiated output format %" GST_PTR_FORMAT " previous %"
        GST_PTR_FORMAT, output_state->caps, prevcaps);

    if (vtdec->session)
      gst_vtdec_invalidate_session (vtdec);

    err = gst_vtdec_create_session (vtdec, format, TRUE);
    if (err == noErr) {
      GST_INFO_OBJECT (vtdec, "using hardware decoder");
    } else if (err == kVTVideoDecoderNotAvailableNowErr && renegotiating) {
      GST_WARNING_OBJECT (vtdec, "hw decoder not available anymore");
      err = gst_vtdec_create_session (vtdec, format, FALSE);
    }

    if (err != noErr) {
      GST_ELEMENT_ERROR (vtdec, RESOURCE, FAILED, (NULL),
          ("VTDecompressionSessionCreate returned %d", (int) err));
    }
  }

  if (vtdec->texture_cache != NULL && !output_textures) {
    gst_video_texture_cache_free (vtdec->texture_cache);
    vtdec->texture_cache = NULL;
  }

  if (err == noErr && output_textures) {
    /* call this regardless of whether caps have changed or not since a new
     * local context could have become available
     */
    gst_gl_context_helper_ensure_context (vtdec->ctxh);

    GST_INFO_OBJECT (vtdec, "pushing textures, context %p old context %p",
        vtdec->ctxh->context,
        vtdec->texture_cache ? vtdec->texture_cache->ctx : NULL);

    if (vtdec->texture_cache
        && vtdec->texture_cache->ctx != vtdec->ctxh->context) {
      gst_video_texture_cache_free (vtdec->texture_cache);
      vtdec->texture_cache = NULL;
    }
    if (!vtdec->texture_cache)
      setup_texture_cache (vtdec, vtdec->ctxh->context);
  }

  if (prevcaps)
    gst_caps_unref (prevcaps);

  if (err != noErr)
    return FALSE;

  return GST_VIDEO_DECODER_CLASS (gst_vtdec_parent_class)->negotiate (decoder);
}
コード例 #13
0
static GstFlowReturn
gst_rsvg_decode_image (GstRsvgDec * rsvg, GstBuffer * buffer,
    GstVideoCodecFrame * frame)
{
  GstVideoDecoder *decoder = GST_VIDEO_DECODER (rsvg);
  GstFlowReturn ret = GST_FLOW_OK;
  cairo_t *cr;
  cairo_surface_t *surface;
  RsvgHandle *handle;
  GError *error = NULL;
  RsvgDimensionData dimension;
  gdouble scalex, scaley;
  GstMapInfo minfo;
  GstVideoFrame vframe;
  GstVideoCodecState *output_state;

  GST_LOG_OBJECT (rsvg, "parsing svg");

  if (!gst_buffer_map (buffer, &minfo, GST_MAP_READ)) {
    GST_ERROR_OBJECT (rsvg, "Failed to get SVG image");
    return GST_FLOW_ERROR;
  }
  handle = rsvg_handle_new_from_data (minfo.data, minfo.size, &error);
  if (!handle) {
    GST_ERROR_OBJECT (rsvg, "Failed to parse SVG image: %s", error->message);
    g_error_free (error);
    return GST_FLOW_ERROR;
  }

  rsvg_handle_get_dimensions (handle, &dimension);

  output_state = gst_video_decoder_get_output_state (decoder);
  if ((output_state == NULL)
      || GST_VIDEO_INFO_WIDTH (&output_state->info) != dimension.width
      || GST_VIDEO_INFO_HEIGHT (&output_state->info) != dimension.height) {

    /* Create the output state */
    if (output_state)
      gst_video_codec_state_unref (output_state);
    output_state =
        gst_video_decoder_set_output_state (decoder, GST_RSVG_VIDEO_FORMAT,
        dimension.width, dimension.height, rsvg->input_state);
  }

  ret = gst_video_decoder_allocate_output_frame (decoder, frame);

  if (ret != GST_FLOW_OK) {
    g_object_unref (handle);
    gst_video_codec_state_unref (output_state);
    GST_ERROR_OBJECT (rsvg, "Buffer allocation failed %s",
        gst_flow_get_name (ret));
    return ret;
  }

  GST_LOG_OBJECT (rsvg, "render image at %d x %d",
      GST_VIDEO_INFO_HEIGHT (&output_state->info),
      GST_VIDEO_INFO_WIDTH (&output_state->info));


  if (!gst_video_frame_map (&vframe,
          &output_state->info, frame->output_buffer, GST_MAP_READWRITE)) {
    GST_ERROR_OBJECT (rsvg, "Failed to get SVG image");
    g_object_unref (handle);
    gst_video_codec_state_unref (output_state);
    return GST_FLOW_ERROR;
  }
  surface =
      cairo_image_surface_create_for_data (GST_VIDEO_FRAME_PLANE_DATA (&vframe,
          0), CAIRO_FORMAT_ARGB32, GST_VIDEO_FRAME_WIDTH (&vframe),
      GST_VIDEO_FRAME_HEIGHT (&vframe), GST_VIDEO_FRAME_PLANE_STRIDE (&vframe,
          0));

  cr = cairo_create (surface);
  cairo_set_operator (cr, CAIRO_OPERATOR_CLEAR);
  cairo_set_source_rgba (cr, 1.0, 1.0, 1.0, 0.0);
  cairo_paint (cr);
  cairo_set_operator (cr, CAIRO_OPERATOR_OVER);
  cairo_set_source_rgba (cr, 0.0, 0.0, 0.0, 1.0);

  scalex = scaley = 1.0;
  if (GST_VIDEO_INFO_WIDTH (&output_state->info) != dimension.width) {
    scalex =
        ((gdouble) GST_VIDEO_INFO_WIDTH (&output_state->info)) /
        ((gdouble) dimension.width);
  }
  if (GST_VIDEO_INFO_HEIGHT (&output_state->info) != dimension.height) {
    scaley =
        ((gdouble) GST_VIDEO_INFO_HEIGHT (&output_state->info)) /
        ((gdouble) dimension.height);
  }
  cairo_scale (cr, scalex, scaley);
  rsvg_handle_render_cairo (handle, cr);

  g_object_unref (handle);
  cairo_destroy (cr);
  cairo_surface_destroy (surface);

  /* Now unpremultiply Cairo's ARGB to match GStreamer's */
  gst_rsvg_decode_unpremultiply (GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0),
      GST_VIDEO_FRAME_WIDTH (&vframe), GST_VIDEO_FRAME_HEIGHT (&vframe));

  gst_video_codec_state_unref (output_state);
  gst_buffer_unmap (buffer, &minfo);
  gst_video_frame_unmap (&vframe);

  return ret;
}
コード例 #14
0
ファイル: gstpnmdec.c プロジェクト: Haifen/gst-plugins-bad
static GstFlowReturn
gst_pnmdec_parse (GstVideoDecoder * decoder, GstVideoCodecFrame * frame,
    GstAdapter * adapter, gboolean at_eos)
{
  gsize size;
  GstPnmdec *s = GST_PNMDEC (decoder);
  GstFlowReturn r = GST_FLOW_OK;
  guint offset = 0;
  GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;
  const guint8 *raw_data;
  GstVideoCodecState *output_state;

  GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);

  size = gst_adapter_available (adapter);
  if (size < 8) {
    goto need_more_data;
  }
  raw_data = gst_adapter_map (adapter, size);

  if (s->mngr.info.fields != GST_PNM_INFO_FIELDS_ALL) {
    GstPnmInfoMngrResult res;

    res = gst_pnm_info_mngr_scan (&s->mngr, raw_data, size);

    switch (res) {
      case GST_PNM_INFO_MNGR_RESULT_FAILED:
        r = GST_FLOW_ERROR;
        goto out;
      case GST_PNM_INFO_MNGR_RESULT_READING:
        r = GST_FLOW_OK;
        goto out;
      case GST_PNM_INFO_MNGR_RESULT_FINISHED:
        switch (s->mngr.info.type) {
          case GST_PNM_TYPE_BITMAP:
            if (s->mngr.info.encoding == GST_PNM_ENCODING_ASCII) {
              r = GST_FLOW_ERROR;
              goto out;
            }
            s->size = s->mngr.info.width * s->mngr.info.height * 1;
            format = GST_VIDEO_FORMAT_GRAY8;
            break;
          case GST_PNM_TYPE_GRAYMAP:
            s->size = s->mngr.info.width * s->mngr.info.height * 1;
            format = GST_VIDEO_FORMAT_GRAY8;
            break;
          case GST_PNM_TYPE_PIXMAP:
            s->size = s->mngr.info.width * s->mngr.info.height * 3;
            format = GST_VIDEO_FORMAT_RGB;
            break;
        }
        output_state =
            gst_video_decoder_set_output_state (GST_VIDEO_DECODER (s), format,
            s->mngr.info.width, s->mngr.info.height, s->input_state);
        gst_video_codec_state_unref (output_state);
        if (gst_video_decoder_negotiate (GST_VIDEO_DECODER (s)) == FALSE) {
          r = GST_FLOW_NOT_NEGOTIATED;
          goto out;
        }

        if (s->mngr.info.encoding == GST_PNM_ENCODING_ASCII) {
          s->mngr.data_offset++;
          /* It is not possible to know the size of input ascii data to parse.
             So we have to parse and know the number of pixels parsed and
             then finally decide when we have full frame */
          s->buf = gst_buffer_new_and_alloc (s->size);
        }
        offset = s->mngr.data_offset;
        gst_adapter_flush (adapter, offset);
        size = size - offset;
    }
  }

  if (s->mngr.info.encoding == GST_PNM_ENCODING_ASCII) {
    /* Parse ASCII data dn populate s->current_size with the number of 
       bytes actually parsed from the input data */
    r = gst_pnmdec_parse_ascii (s, raw_data + offset, size);
  } else {
    /* Bitmap Contains 8 pixels in a byte */
    if (s->mngr.info.type == GST_PNM_TYPE_BITMAP)
      s->current_size += (size * 8);
    else
      s->current_size += size;
  }

  gst_video_decoder_add_to_frame (decoder, size);
  if (s->size <= s->current_size) {
    goto have_full_frame;
  }

need_more_data:
  return GST_VIDEO_DECODER_FLOW_NEED_DATA;

have_full_frame:
  return gst_video_decoder_have_frame (decoder);

out:
  return r;
}
コード例 #15
0
static gboolean
gst_amc_video_dec_set_src_caps (GstAmcVideoDec * self, GstAmcFormat * format)
{
  GstVideoCodecState *output_state;
  const gchar *mime;
  gint color_format, width, height;
  gint stride, slice_height;
  gint crop_left, crop_right;
  gint crop_top, crop_bottom;
  GstVideoFormat gst_format;
  GstAmcVideoDecClass *klass = GST_AMC_VIDEO_DEC_GET_CLASS (self);
  GError *err = NULL;
  gboolean ret;

  if (!gst_amc_format_get_int (format, "color-format", &color_format, &err) ||
      !gst_amc_format_get_int (format, "width", &width, &err) ||
      !gst_amc_format_get_int (format, "height", &height, &err)) {
    GST_ERROR_OBJECT (self, "Failed to get output format metadata: %s",
        err->message);
    g_clear_error (&err);
    return FALSE;
  }

  if (!gst_amc_format_get_int (format, "stride", &stride, &err) ||
      !gst_amc_format_get_int (format, "slice-height", &slice_height, &err)) {
    GST_ERROR_OBJECT (self, "Failed to get stride and slice-height: %s",
        err->message);
    g_clear_error (&err);
    return FALSE;
  }

  if (!gst_amc_format_get_int (format, "crop-left", &crop_left, &err) ||
      !gst_amc_format_get_int (format, "crop-right", &crop_right, &err) ||
      !gst_amc_format_get_int (format, "crop-top", &crop_top, &err) ||
      !gst_amc_format_get_int (format, "crop-bottom", &crop_bottom, &err)) {
    GST_ERROR_OBJECT (self, "Failed to get crop rectangle: %s", err->message);
    g_clear_error (&err);
    return FALSE;
  }

  if (width == 0 || height == 0) {
    GST_ERROR_OBJECT (self, "Height or width not set");
    return FALSE;
  }

  if (crop_bottom)
    height = height - (height - crop_bottom - 1);
  if (crop_top)
    height = height - crop_top;

  if (crop_right)
    width = width - (width - crop_right - 1);
  if (crop_left)
    width = width - crop_left;

  mime = caps_to_mime (self->input_state->caps);
  if (!mime) {
    GST_ERROR_OBJECT (self, "Failed to convert caps to mime");
    return FALSE;
  }

  gst_format =
      gst_amc_color_format_to_video_format (klass->codec_info, mime,
      color_format);

  if (gst_format == GST_VIDEO_FORMAT_UNKNOWN) {
    GST_ERROR_OBJECT (self, "Unknown color format 0x%08x", color_format);
    return FALSE;
  }

  output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (self),
      gst_format, width, height, self->input_state);

  /* FIXME: Special handling for multiview, untested */
  if (color_format == COLOR_QCOM_FormatYVU420SemiPlanar32mMultiView) {
    gst_video_multiview_video_info_change_mode (&output_state->info,
        GST_VIDEO_MULTIVIEW_MODE_TOP_BOTTOM, GST_VIDEO_MULTIVIEW_FLAGS_NONE);
  }

  self->format = gst_format;
  if (!gst_amc_color_format_info_set (&self->color_format_info,
          klass->codec_info, mime, color_format, width, height, stride,
          slice_height, crop_left, crop_right, crop_top, crop_bottom)) {
    GST_ERROR_OBJECT (self, "Failed to set up GstAmcColorFormatInfo");
    return FALSE;
  }

  GST_DEBUG_OBJECT (self,
      "Color format info: {color_format=%d, width=%d, height=%d, "
      "stride=%d, slice-height=%d, crop-left=%d, crop-top=%d, "
      "crop-right=%d, crop-bottom=%d, frame-size=%d}",
      self->color_format_info.color_format, self->color_format_info.width,
      self->color_format_info.height, self->color_format_info.stride,
      self->color_format_info.slice_height, self->color_format_info.crop_left,
      self->color_format_info.crop_top, self->color_format_info.crop_right,
      self->color_format_info.crop_bottom, self->color_format_info.frame_size);

  ret = gst_video_decoder_negotiate (GST_VIDEO_DECODER (self));
  gst_video_codec_state_unref (output_state);
  self->input_state_changed = FALSE;

  return ret;
}
コード例 #16
0
static GstFlowReturn
gst_pngdec_caps_create_and_set (GstPngDec * pngdec)
{
  GstFlowReturn ret = GST_FLOW_OK;
  gint bpc = 0, color_type;
  png_uint_32 width, height;
  GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;

  g_return_val_if_fail (GST_IS_PNGDEC (pngdec), GST_FLOW_ERROR);

  /* Get bits per channel */
  bpc = png_get_bit_depth (pngdec->png, pngdec->info);

  /* Get Color type */
  color_type = png_get_color_type (pngdec->png, pngdec->info);

  /* Add alpha channel if 16-bit depth, but not for GRAY images */
  if ((bpc > 8) && (color_type != PNG_COLOR_TYPE_GRAY)) {
    png_set_add_alpha (pngdec->png, 0xffff, PNG_FILLER_BEFORE);
    png_set_swap (pngdec->png);
  }
#if 0
  /* We used to have this HACK to reverse the outgoing bytes, but the problem
   * that originally required the hack seems to have been in videoconvert's
   * RGBA descriptions. It doesn't seem needed now that's fixed, but might
   * still be needed on big-endian systems, I'm not sure. J.S. 6/7/2007 */
  if (color_type == PNG_COLOR_TYPE_RGB_ALPHA)
    png_set_bgr (pngdec->png);
#endif

  /* Gray scale with alpha channel converted to RGB */
  if (color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
    GST_LOG_OBJECT (pngdec,
        "converting grayscale png with alpha channel to RGB");
    png_set_gray_to_rgb (pngdec->png);
  }

  /* Gray scale converted to upscaled to 8 bits */
  if ((color_type == PNG_COLOR_TYPE_GRAY_ALPHA) ||
      (color_type == PNG_COLOR_TYPE_GRAY)) {
    if (bpc < 8) {              /* Convert to 8 bits */
      GST_LOG_OBJECT (pngdec, "converting grayscale image to 8 bits");
#if PNG_LIBPNG_VER < 10400
      png_set_gray_1_2_4_to_8 (pngdec->png);
#else
      png_set_expand_gray_1_2_4_to_8 (pngdec->png);
#endif
    }
  }

  /* Palette converted to RGB */
  if (color_type == PNG_COLOR_TYPE_PALETTE) {
    GST_LOG_OBJECT (pngdec, "converting palette png to RGB");
    png_set_palette_to_rgb (pngdec->png);
  }

  png_set_interlace_handling (pngdec->png);

  /* Update the info structure */
  png_read_update_info (pngdec->png, pngdec->info);

  /* Get IHDR header again after transformation settings */
  png_get_IHDR (pngdec->png, pngdec->info, &width, &height,
      &bpc, &pngdec->color_type, NULL, NULL, NULL);

  GST_LOG_OBJECT (pngdec, "this is a %dx%d PNG image", (gint) width,
      (gint) height);

  switch (pngdec->color_type) {
    case PNG_COLOR_TYPE_RGB:
      GST_LOG_OBJECT (pngdec, "we have no alpha channel, depth is 24 bits");
      if (bpc == 8)
        format = GST_VIDEO_FORMAT_RGB;
      break;
    case PNG_COLOR_TYPE_RGB_ALPHA:
      GST_LOG_OBJECT (pngdec,
          "we have an alpha channel, depth is 32 or 64 bits");
      if (bpc == 8)
        format = GST_VIDEO_FORMAT_RGBA;
      else if (bpc == 16)
        format = GST_VIDEO_FORMAT_ARGB64;
      break;
    case PNG_COLOR_TYPE_GRAY:
      GST_LOG_OBJECT (pngdec,
          "We have an gray image, depth is 8 or 16 (be) bits");
      if (bpc == 8)
        format = GST_VIDEO_FORMAT_GRAY8;
      else if (bpc == 16)
        format = GST_VIDEO_FORMAT_GRAY16_BE;
      break;
    default:
      break;
  }

  if (format == GST_VIDEO_FORMAT_UNKNOWN) {
    GST_ELEMENT_ERROR (pngdec, STREAM, NOT_IMPLEMENTED, (NULL),
        ("pngdec does not support this color type"));
    ret = GST_FLOW_NOT_SUPPORTED;
    goto beach;
  }

  /* Check if output state changed */
  if (pngdec->output_state) {
    GstVideoInfo *info = &pngdec->output_state->info;

    if (width == GST_VIDEO_INFO_WIDTH (info) &&
        height == GST_VIDEO_INFO_HEIGHT (info) &&
        GST_VIDEO_INFO_FORMAT (info) == format) {
      goto beach;
    }
    gst_video_codec_state_unref (pngdec->output_state);
  }
#ifdef HAVE_LIBPNG_1_5
  if ((pngdec->color_type & PNG_COLOR_MASK_COLOR)
      && !(pngdec->color_type & PNG_COLOR_MASK_PALETTE)
      && png_get_valid (pngdec->png, pngdec->info, PNG_INFO_iCCP)) {
    png_charp icc_name;
    png_bytep icc_profile;
    int icc_compression_type;
    png_uint_32 icc_proflen = 0;
    png_uint_32 ret = png_get_iCCP (pngdec->png, pngdec->info, &icc_name,
        &icc_compression_type, &icc_profile, &icc_proflen);

    if ((ret & PNG_INFO_iCCP)) {
      gpointer gst_icc_prof = g_memdup (icc_profile, icc_proflen);
      GstBuffer *tagbuffer = NULL;
      GstSample *tagsample = NULL;
      GstTagList *taglist = NULL;
      GstStructure *info = NULL;
      GstCaps *caps;

      GST_DEBUG_OBJECT (pngdec, "extracted ICC profile '%s' length=%i",
          icc_name, (guint32) icc_proflen);

      tagbuffer = gst_buffer_new_wrapped (gst_icc_prof, icc_proflen);

      caps = gst_caps_new_empty_simple ("application/vnd.iccprofile");
      info = gst_structure_new_empty ("application/vnd.iccprofile");

      if (icc_name)
        gst_structure_set (info, "icc-name", G_TYPE_STRING, icc_name, NULL);

      tagsample = gst_sample_new (tagbuffer, caps, NULL, info);

      gst_buffer_unref (tagbuffer);
      gst_caps_unref (caps);

      taglist = gst_tag_list_new_empty ();
      gst_tag_list_add (taglist, GST_TAG_MERGE_APPEND, GST_TAG_ATTACHMENT,
          tagsample, NULL);
      gst_sample_unref (tagsample);

      gst_video_decoder_merge_tags (GST_VIDEO_DECODER (pngdec), taglist,
          GST_TAG_MERGE_APPEND);
      gst_tag_list_unref (taglist);
    }
  }
#endif

  pngdec->output_state =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (pngdec), format,
      width, height, pngdec->input_state);
  gst_video_decoder_negotiate (GST_VIDEO_DECODER (pngdec));
  GST_DEBUG ("Final %d %d", GST_VIDEO_INFO_WIDTH (&pngdec->output_state->info),
      GST_VIDEO_INFO_HEIGHT (&pngdec->output_state->info));

beach:
  return ret;
}
コード例 #17
0
static GstFlowReturn gst_openh264dec_handle_frame(GstVideoDecoder *decoder, GstVideoCodecFrame *frame)
{
    GstOpenh264Dec *openh264dec = GST_OPENH264DEC(decoder);
    GstMapInfo map_info;
    GstVideoCodecState *state;
    SBufferInfo dst_buf_info;
    DECODING_STATE ret;
    guint8 *yuvdata[3];
    GstFlowReturn flow_status;
    GstVideoFrame video_frame;
    guint actual_width, actual_height;
    guint i;
    guint8 *p;
    guint row_stride, component_width, component_height, src_width, row;

    if (frame) {
        if (!gst_buffer_map(frame->input_buffer, &map_info, GST_MAP_READ)) {
            GST_ERROR_OBJECT(openh264dec, "Cannot map input buffer!");
            return GST_FLOW_ERROR;
        }

        GST_LOG_OBJECT(openh264dec, "handle frame, %d", map_info.size > 4 ? map_info.data[4] & 0x1f : -1);

        memset (&dst_buf_info, 0, sizeof (SBufferInfo));
        ret = openh264dec->priv->decoder->DecodeFrame2(map_info.data, map_info.size, yuvdata, &dst_buf_info);

        if (ret == dsNoParamSets) {
            GST_DEBUG_OBJECT(openh264dec, "Requesting a key unit");
            gst_pad_push_event(GST_VIDEO_DECODER_SINK_PAD(decoder),
                gst_video_event_new_upstream_force_key_unit(GST_CLOCK_TIME_NONE, FALSE, 0));
        }

        if (ret != dsErrorFree && ret != dsNoParamSets) {
            GST_DEBUG_OBJECT(openh264dec, "Requesting a key unit");
            gst_pad_push_event(GST_VIDEO_DECODER_SINK_PAD(decoder),
                               gst_video_event_new_upstream_force_key_unit(GST_CLOCK_TIME_NONE, FALSE, 0));
            GST_LOG_OBJECT(openh264dec, "error decoding nal, return code: %d", ret);
        }

        gst_buffer_unmap(frame->input_buffer, &map_info);
        gst_video_codec_frame_unref (frame);
        frame = NULL;
    } else {
        memset (&dst_buf_info, 0, sizeof (SBufferInfo));
        ret = openh264dec->priv->decoder->DecodeFrame2(NULL, 0, yuvdata, &dst_buf_info);
        if (ret != dsErrorFree)
            return GST_FLOW_EOS;
    }

    /* FIXME: openh264 has no way for us to get a connection
     * between the input and output frames, we just have to
     * guess based on the input. Fortunately openh264 can
     * only do baseline profile. */
    frame = gst_video_decoder_get_oldest_frame (decoder);
    if (!frame) {
      /* Can only happen in finish() */
      return GST_FLOW_EOS;
    }

    /* No output available yet */
    if (dst_buf_info.iBufferStatus != 1) {
        return (frame ? GST_FLOW_OK : GST_FLOW_EOS);
    }

    actual_width  = dst_buf_info.UsrData.sSystemBuffer.iWidth;
    actual_height = dst_buf_info.UsrData.sSystemBuffer.iHeight;

    if (!gst_pad_has_current_caps (GST_VIDEO_DECODER_SRC_PAD (openh264dec)) || actual_width != openh264dec->priv->width || actual_height != openh264dec->priv->height) {
        state = gst_video_decoder_set_output_state(decoder,
            GST_VIDEO_FORMAT_I420,
            actual_width,
            actual_height,
            openh264dec->priv->input_state);
        openh264dec->priv->width = actual_width;
        openh264dec->priv->height = actual_height;

        if (!gst_video_decoder_negotiate(decoder)) {
            GST_ERROR_OBJECT(openh264dec, "Failed to negotiate with downstream elements");
            return GST_FLOW_NOT_NEGOTIATED;
        }
    } else {
        state = gst_video_decoder_get_output_state(decoder);
    }

    flow_status = gst_video_decoder_allocate_output_frame(decoder, frame);
    if (flow_status != GST_FLOW_OK) {
        gst_video_codec_state_unref (state);
        return flow_status;
    }

    if (!gst_video_frame_map(&video_frame, &state->info, frame->output_buffer, GST_MAP_WRITE)) {
        GST_ERROR_OBJECT(openh264dec, "Cannot map output buffer!");
        gst_video_codec_state_unref (state);
        return GST_FLOW_ERROR;
    }

    for (i = 0; i < 3; i++) {
        p = GST_VIDEO_FRAME_COMP_DATA(&video_frame, i);
        row_stride = GST_VIDEO_FRAME_COMP_STRIDE(&video_frame, i);
        component_width = GST_VIDEO_FRAME_COMP_WIDTH(&video_frame, i);
        component_height = GST_VIDEO_FRAME_COMP_HEIGHT(&video_frame, i);
        src_width = i < 1 ? dst_buf_info.UsrData.sSystemBuffer.iStride[0] : dst_buf_info.UsrData.sSystemBuffer.iStride[1];
        for (row = 0; row < component_height; row++) {
            memcpy(p, yuvdata[i], component_width);
            p += row_stride;
            yuvdata[i] += src_width;
        }
    }
    gst_video_codec_state_unref (state);
    gst_video_frame_unmap(&video_frame);

    return gst_video_decoder_finish_frame(decoder, frame);
}
コード例 #18
0
ファイル: gstmpeg2dec.c プロジェクト: PeterXu/gst-mobile
static GstFlowReturn
handle_sequence (GstMpeg2dec * mpeg2dec, const mpeg2_info_t * info)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstClockTime latency;
  const mpeg2_sequence_t *sequence;
  GstVideoCodecState *state;
  GstVideoInfo *dinfo = &mpeg2dec->decoded_info;
  GstVideoInfo *vinfo;
  GstVideoFormat format;

  sequence = info->sequence;

  if (sequence->frame_period == 0)
    goto invalid_frame_period;

  /* mpeg2 video can only be from 16x16 to 4096x4096. Everything
   * else is a corrupted file */
  if (sequence->width > 4096 || sequence->width < 16 ||
      sequence->height > 4096 || sequence->height < 16)
    goto invalid_size;

  GST_DEBUG_OBJECT (mpeg2dec,
      "widthxheight: %dx%d , decoded_widthxheight: %dx%d",
      sequence->picture_width, sequence->picture_height, sequence->width,
      sequence->height);

  if (sequence->picture_width != sequence->width ||
      sequence->picture_height != sequence->height) {
    GST_DEBUG_OBJECT (mpeg2dec, "we need to crop");
    mpeg2dec->need_cropping = TRUE;
  } else {
    GST_DEBUG_OBJECT (mpeg2dec, "no cropping needed");
    mpeg2dec->need_cropping = FALSE;
  }

  /* get subsampling */
  if (sequence->chroma_width < sequence->width) {
    /* horizontally subsampled */
    if (sequence->chroma_height < sequence->height) {
      /* and vertically subsamples */
      format = GST_VIDEO_FORMAT_I420;
    } else {
      format = GST_VIDEO_FORMAT_Y42B;
    }
  } else {
    /* not subsampled */
    format = GST_VIDEO_FORMAT_Y444;
  }

  state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (mpeg2dec),
      format, sequence->picture_width, sequence->picture_height,
      mpeg2dec->input_state);
  vinfo = &state->info;

  /* If we don't have a valid upstream PAR override it */
  if (GST_VIDEO_INFO_PAR_N (vinfo) == 1 &&
      GST_VIDEO_INFO_PAR_D (vinfo) == 1 &&
      sequence->pixel_width != 0 && sequence->pixel_height != 0) {
#if MPEG2_RELEASE >= MPEG2_VERSION(0,5,0)
    guint pixel_width, pixel_height;
    if (mpeg2_guess_aspect (sequence, &pixel_width, &pixel_height)) {
      vinfo->par_n = pixel_width;
      vinfo->par_d = pixel_height;
    }
#else
    vinfo->par_n = sequence->pixel_width;
    vinfo->par_d = sequence->pixel_height;
#endif
    GST_DEBUG_OBJECT (mpeg2dec, "Setting PAR %d x %d",
        vinfo->par_n, vinfo->par_d);
  }
  vinfo->fps_n = 27000000;
  vinfo->fps_d = sequence->frame_period;

  if (!(sequence->flags & SEQ_FLAG_PROGRESSIVE_SEQUENCE))
    vinfo->interlace_mode = GST_VIDEO_INTERLACE_MODE_MIXED;
  else
    vinfo->interlace_mode = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;

  vinfo->chroma_site = GST_VIDEO_CHROMA_SITE_MPEG2;
  vinfo->colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235;

  if (sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION) {
    /* do color description */
    switch (sequence->colour_primaries) {
      case 1:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT709;
        break;
      case 4:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470M;
        break;
      case 5:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG;
        break;
      case 6:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE170M;
        break;
      case 7:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE240M;
        break;
        /* 0 forbidden */
        /* 2 unspecified */
        /* 3 reserved */
        /* 8-255 reseved */
      default:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN;
        break;
    }
    /* matrix coefficients */
    switch (sequence->matrix_coefficients) {
      case 1:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT709;
        break;
      case 4:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_FCC;
        break;
      case 5:
      case 6:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601;
        break;
      case 7:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_SMPTE240M;
        break;
        /* 0 forbidden */
        /* 2 unspecified */
        /* 3 reserved */
        /* 8-255 reseved */
      default:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_UNKNOWN;
        break;
    }
    /* transfer characteristics */
    switch (sequence->transfer_characteristics) {
      case 1:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
        break;
      case 4:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA22;
        break;
      case 5:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA28;
        break;
      case 6:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
        break;
      case 7:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_SMPTE240M;
        break;
      case 8:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA10;
        break;
        /* 0 forbidden */
        /* 2 unspecified */
        /* 3 reserved */
        /* 9-255 reseved */
      default:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_UNKNOWN;
        break;
    }
  }

  GST_DEBUG_OBJECT (mpeg2dec,
      "sequence flags: %d, frame period: %d, frame rate: %d/%d",
      sequence->flags, sequence->frame_period, vinfo->fps_n, vinfo->fps_d);
  GST_DEBUG_OBJECT (mpeg2dec, "profile: %02x, colour_primaries: %d",
      sequence->profile_level_id, sequence->colour_primaries);
  GST_DEBUG_OBJECT (mpeg2dec, "transfer chars: %d, matrix coef: %d",
      sequence->transfer_characteristics, sequence->matrix_coefficients);
  GST_DEBUG_OBJECT (mpeg2dec,
      "FLAGS: CONSTRAINED_PARAMETERS:%d, PROGRESSIVE_SEQUENCE:%d",
      sequence->flags & SEQ_FLAG_CONSTRAINED_PARAMETERS,
      sequence->flags & SEQ_FLAG_PROGRESSIVE_SEQUENCE);
  GST_DEBUG_OBJECT (mpeg2dec, "FLAGS: LOW_DELAY:%d, COLOUR_DESCRIPTION:%d",
      sequence->flags & SEQ_FLAG_LOW_DELAY,
      sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION);

  /* we store the codec size before cropping */
  *dinfo = *vinfo;
  gst_video_info_set_format (dinfo, format, sequence->width, sequence->height);

  /* Mpeg2dec has 2 frame latency to produce a picture and 1 frame latency in
   * it's parser */
  latency = gst_util_uint64_scale (3, vinfo->fps_d, vinfo->fps_n);
  gst_video_decoder_set_latency (GST_VIDEO_DECODER (mpeg2dec), latency,
      latency);

  if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (mpeg2dec)))
    goto negotiation_fail;

  gst_video_codec_state_unref (state);

  mpeg2_custom_fbuf (mpeg2dec->decoder, 1);

  init_dummybuf (mpeg2dec);

  /* Pump in some null buffers, because otherwise libmpeg2 doesn't
   * initialise the discard_fbuf->id */
  mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL);
  mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL);
  mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL);
  gst_mpeg2dec_clear_buffers (mpeg2dec);

  return ret;

invalid_frame_period:
  {
    GST_WARNING_OBJECT (mpeg2dec, "Frame period is 0!");
    return GST_FLOW_ERROR;
  }
invalid_size:
  {
    GST_ERROR_OBJECT (mpeg2dec, "Invalid frame dimensions: %d x %d",
        sequence->width, sequence->height);
    return GST_FLOW_ERROR;
  }

negotiation_fail:
  {
    GST_WARNING_OBJECT (mpeg2dec, "Failed to negotiate with downstream");
    return GST_FLOW_ERROR;
  }
}
コード例 #19
0
static GstFlowReturn
gst_v4l2_video_dec_handle_frame (GstVideoDecoder * decoder,
    GstVideoCodecFrame * frame)
{
  GstV4l2Error error = GST_V4L2_ERROR_INIT;
  GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
  GstFlowReturn ret = GST_FLOW_OK;
  gboolean processed = FALSE;
  GstBuffer *tmp;

  GST_DEBUG_OBJECT (self, "Handling frame %d", frame->system_frame_number);

  if (G_UNLIKELY (!g_atomic_int_get (&self->active)))
    goto flushing;

  if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2output))) {
    if (!self->input_state)
      goto not_negotiated;
    if (!gst_v4l2_object_set_format (self->v4l2output, self->input_state->caps,
          &error))
      goto not_negotiated;
  }

  if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2capture))) {
    GstBufferPool *pool = GST_BUFFER_POOL (self->v4l2output->pool);
    GstVideoInfo info;
    GstVideoCodecState *output_state;
    GstBuffer *codec_data;
    GstCaps *acquired_caps, *available_caps, *caps, *filter;
    GstStructure *st;

    GST_DEBUG_OBJECT (self, "Sending header");

    codec_data = self->input_state->codec_data;

    /* We are running in byte-stream mode, so we don't know the headers, but
     * we need to send something, otherwise the decoder will refuse to
     * intialize.
     */
    if (codec_data) {
      gst_buffer_ref (codec_data);
    } else {
      codec_data = gst_buffer_ref (frame->input_buffer);
      processed = TRUE;
    }

    /* Ensure input internal pool is active */
    if (!gst_buffer_pool_is_active (pool)) {
      GstStructure *config = gst_buffer_pool_get_config (pool);
      gst_buffer_pool_config_set_params (config, self->input_state->caps,
          self->v4l2output->info.size, 2, 2);

      /* There is no reason to refuse this config */
      if (!gst_buffer_pool_set_config (pool, config))
        goto activate_failed;

      if (!gst_buffer_pool_set_active (pool, TRUE))
        goto activate_failed;
    }

    GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
    ret =
        gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->
            v4l2output->pool), &codec_data);
    GST_VIDEO_DECODER_STREAM_LOCK (decoder);

    gst_buffer_unref (codec_data);

    /* For decoders G_FMT returns coded size, G_SELECTION returns visible size
     * in the compose rectangle. gst_v4l2_object_acquire_format() checks both
     * and returns the visible size as with/height and the coded size as
     * padding. */
    if (!gst_v4l2_object_acquire_format (self->v4l2capture, &info))
      goto not_negotiated;

    /* Create caps from the acquired format, remove the format field */
    acquired_caps = gst_video_info_to_caps (&info);
    st = gst_caps_get_structure (acquired_caps, 0);
    gst_structure_remove_field (st, "format");

    /* Probe currently available pixel formats */
    available_caps = gst_v4l2_object_probe_caps (self->v4l2capture, NULL);
    available_caps = gst_caps_make_writable (available_caps);

    /* Replace coded size with visible size, we want to negotiate visible size
     * with downstream, not coded size. */
    gst_caps_map_in_place (available_caps, gst_v4l2_video_remove_padding, self);

    filter = gst_caps_intersect_full (available_caps, acquired_caps,
        GST_CAPS_INTERSECT_FIRST);
    gst_caps_unref (acquired_caps);
    gst_caps_unref (available_caps);
    caps = gst_pad_peer_query_caps (decoder->srcpad, filter);
    gst_caps_unref (filter);

    GST_DEBUG_OBJECT (self, "Possible decoded caps: %" GST_PTR_FORMAT, caps);
    if (gst_caps_is_empty (caps)) {
      gst_caps_unref (caps);
      goto not_negotiated;
    }

    /* Fixate pixel format */
    caps = gst_caps_fixate (caps);

    GST_DEBUG_OBJECT (self, "Chosen decoded caps: %" GST_PTR_FORMAT, caps);

    /* Try to set negotiated format, on success replace acquired format */
    if (gst_v4l2_object_set_format (self->v4l2capture, caps, &error))
      gst_video_info_from_caps (&info, caps);
    else
      gst_v4l2_clear_error (&error);
    gst_caps_unref (caps);

    output_state = gst_video_decoder_set_output_state (decoder,
        info.finfo->format, info.width, info.height, self->input_state);

    /* Copy the rest of the information, there might be more in the future */
    output_state->info.interlace_mode = info.interlace_mode;
    gst_video_codec_state_unref (output_state);

    if (!gst_video_decoder_negotiate (decoder)) {
      if (GST_PAD_IS_FLUSHING (decoder->srcpad))
        goto flushing;
      else
        goto not_negotiated;
    }

    /* Ensure our internal pool is activated */
    if (!gst_buffer_pool_set_active (GST_BUFFER_POOL (self->v4l2capture->pool),
            TRUE))
      goto activate_failed;
  }

  if (g_atomic_int_get (&self->processing) == FALSE) {
    /* It's possible that the processing thread stopped due to an error */
    if (self->output_flow != GST_FLOW_OK &&
        self->output_flow != GST_FLOW_FLUSHING) {
      GST_DEBUG_OBJECT (self, "Processing loop stopped with error, leaving");
      ret = self->output_flow;
      goto drop;
    }

    GST_DEBUG_OBJECT (self, "Starting decoding thread");

    /* Start the processing task, when it quits, the task will disable input
     * processing to unlock input if draining, or prevent potential block */
    g_atomic_int_set (&self->processing, TRUE);
    if (!gst_pad_start_task (decoder->srcpad,
            (GstTaskFunction) gst_v4l2_video_dec_loop, self,
            (GDestroyNotify) gst_v4l2_video_dec_loop_stopped))
      goto start_task_failed;
  }

  if (!processed) {
    GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
    ret =
        gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->v4l2output->
            pool), &frame->input_buffer);
    GST_VIDEO_DECODER_STREAM_LOCK (decoder);

    if (ret == GST_FLOW_FLUSHING) {
      if (g_atomic_int_get (&self->processing) == FALSE)
        ret = self->output_flow;
      goto drop;
    } else if (ret != GST_FLOW_OK) {
      goto process_failed;
    }
  }

  /* No need to keep input arround */
  tmp = frame->input_buffer;
  frame->input_buffer = gst_buffer_new ();
  gst_buffer_copy_into (frame->input_buffer, tmp,
      GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS |
      GST_BUFFER_COPY_META, 0, 0);
  gst_buffer_unref (tmp);

  gst_video_codec_frame_unref (frame);
  return ret;

  /* ERRORS */
not_negotiated:
  {
    GST_ERROR_OBJECT (self, "not negotiated");
    ret = GST_FLOW_NOT_NEGOTIATED;
    gst_v4l2_error (self, &error);
    goto drop;
  }
activate_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS,
        (_("Failed to allocate required memory.")),
        ("Buffer pool activation failed"));
    ret = GST_FLOW_ERROR;
    goto drop;
  }
flushing:
  {
    ret = GST_FLOW_FLUSHING;
    goto drop;
  }

start_task_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
        (_("Failed to start decoding thread.")), (NULL));
    g_atomic_int_set (&self->processing, FALSE);
    ret = GST_FLOW_ERROR;
    goto drop;
  }
process_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
        (_("Failed to process frame.")),
        ("Maybe be due to not enough memory or failing driver"));
    ret = GST_FLOW_ERROR;
    goto drop;
  }
drop:
  {
    gst_video_decoder_drop_frame (decoder, frame);
    return ret;
  }
}
コード例 #20
0
static gboolean
gst_vaapidecode_update_src_caps (GstVaapiDecode * decode)
{
  GstVideoDecoder *const vdec = GST_VIDEO_DECODER (decode);
  GstVideoCodecState *state, *ref_state;
  GstVideoInfo *vi;
  GstVideoFormat format = GST_VIDEO_FORMAT_I420;

  if (!decode->input_state)
    return FALSE;

  ref_state = decode->input_state;

  GstCapsFeatures *features = NULL;
  GstVaapiCapsFeature feature;

  feature =
      gst_vaapi_find_preferred_caps_feature (GST_VIDEO_DECODER_SRC_PAD (vdec),
      GST_VIDEO_INFO_FORMAT (&ref_state->info), &format);

  if (feature == GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED)
    return FALSE;

  switch (feature) {
#if (USE_GLX || USE_EGL)
    case GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META:
      features =
          gst_caps_features_new
          (GST_CAPS_FEATURE_META_GST_VIDEO_GL_TEXTURE_UPLOAD_META, NULL);
      break;
#endif
#if GST_CHECK_VERSION(1,3,1)
    case GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE:
      features =
          gst_caps_features_new (GST_CAPS_FEATURE_MEMORY_VAAPI_SURFACE, NULL);
      break;
#endif
    default:
      break;
  }

  state = gst_video_decoder_set_output_state (vdec, format,
      ref_state->info.width, ref_state->info.height, ref_state);
  if (!state || state->info.width == 0 || state->info.height == 0)
    return FALSE;

  vi = &state->info;

  state->caps = gst_video_info_to_caps (vi);
  if (features)
    gst_caps_set_features (state->caps, 0, features);
  GST_INFO_OBJECT (decode, "new src caps = %" GST_PTR_FORMAT, state->caps);
  gst_caps_replace (&decode->srcpad_caps, state->caps);
  gst_video_codec_state_unref (state);

  gint fps_n = GST_VIDEO_INFO_FPS_N (vi);
  gint fps_d = GST_VIDEO_INFO_FPS_D (vi);
  if (fps_n <= 0 || fps_d <= 0) {
    GST_DEBUG_OBJECT (decode, "forcing 25/1 framerate for latency calculation");
    fps_n = 25;
    fps_d = 1;
  }

  /* For parsing/preparation purposes we'd need at least 1 frame
   * latency in general, with perfectly known unit boundaries (NALU,
   * AU), and up to 2 frames when we need to wait for the second frame
   * start to determine the first frame is complete */
  GstClockTime latency = gst_util_uint64_scale (2 * GST_SECOND, fps_d, fps_n);
  gst_video_decoder_set_latency (vdec, latency, latency);

  return TRUE;
}
コード例 #21
0
static GstFlowReturn
gst_openjpeg_dec_negotiate (GstOpenJPEGDec * self, opj_image_t * image)
{
  GstVideoFormat format;
  gint width, height;

  if (image->color_space == CLRSPC_UNKNOWN || image->color_space == 0)
    image->color_space = self->color_space;

  switch (image->color_space) {
    case CLRSPC_SRGB:
      if (image->numcomps == 4) {
        if (image->comps[0].dx != 1 || image->comps[0].dy != 1 ||
            image->comps[1].dx != 1 || image->comps[1].dy != 1 ||
            image->comps[2].dx != 1 || image->comps[2].dy != 1 ||
            image->comps[3].dx != 1 || image->comps[3].dy != 1) {
          GST_ERROR_OBJECT (self, "Sub-sampling for RGB not supported");
          return GST_FLOW_NOT_NEGOTIATED;
        }

        if (get_highest_prec (image) == 8) {
          self->fill_frame = fill_frame_packed8_4;
          format = GST_VIDEO_FORMAT_ARGB;
        } else if (get_highest_prec (image) <= 16) {
          self->fill_frame = fill_frame_packed16_4;
          format = GST_VIDEO_FORMAT_ARGB64;
        } else {
          GST_ERROR_OBJECT (self, "Unsupported depth %d", image->comps[3].prec);
          return GST_FLOW_NOT_NEGOTIATED;
        }
      } else if (image->numcomps == 3) {
        if (image->comps[0].dx != 1 || image->comps[0].dy != 1 ||
            image->comps[1].dx != 1 || image->comps[1].dy != 1 ||
            image->comps[2].dx != 1 || image->comps[2].dy != 1) {
          GST_ERROR_OBJECT (self, "Sub-sampling for RGB not supported");
          return GST_FLOW_NOT_NEGOTIATED;
        }

        if (get_highest_prec (image) == 8) {
          self->fill_frame = fill_frame_packed8_3;
          format = GST_VIDEO_FORMAT_ARGB;
        } else if (get_highest_prec (image) <= 16) {
          self->fill_frame = fill_frame_packed16_3;
          format = GST_VIDEO_FORMAT_ARGB64;
        } else {
          GST_ERROR_OBJECT (self, "Unsupported depth %d",
              get_highest_prec (image));
          return GST_FLOW_NOT_NEGOTIATED;
        }
      } else {
        GST_ERROR_OBJECT (self, "Unsupported number of RGB components: %d",
            image->numcomps);
        return GST_FLOW_NOT_NEGOTIATED;
      }
      break;
    case CLRSPC_GRAY:
      if (image->numcomps == 1) {
        if (image->comps[0].dx != 1 && image->comps[0].dy != 1) {
          GST_ERROR_OBJECT (self, "Sub-sampling for GRAY not supported");
          return GST_FLOW_NOT_NEGOTIATED;
        }

        if (get_highest_prec (image) == 8) {
          self->fill_frame = fill_frame_planar8_1;
          format = GST_VIDEO_FORMAT_GRAY8;
        } else if (get_highest_prec (image) <= 16) {
          self->fill_frame = fill_frame_planar16_1;
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
          format = GST_VIDEO_FORMAT_GRAY16_LE;
#else
          format = GST_VIDEO_FORMAT_GRAY16_BE;
#endif
        } else {
          GST_ERROR_OBJECT (self, "Unsupported depth %d",
              get_highest_prec (image));
          return GST_FLOW_NOT_NEGOTIATED;
        }
      } else {
        GST_ERROR_OBJECT (self, "Unsupported number of GRAY components: %d",
            image->numcomps);
        return GST_FLOW_NOT_NEGOTIATED;
      }
      break;
    case CLRSPC_SYCC:
      if (image->numcomps != 3 && image->numcomps != 4) {
        GST_ERROR_OBJECT (self, "Unsupported number of YUV components: %d",
            image->numcomps);
        return GST_FLOW_NOT_NEGOTIATED;
      }

      if (image->comps[0].dx != 1 || image->comps[0].dy != 1) {
        GST_ERROR_OBJECT (self, "Sub-sampling of luma plane not supported");
        return GST_FLOW_NOT_NEGOTIATED;
      }

      if (image->comps[1].dx != image->comps[2].dx ||
          image->comps[1].dy != image->comps[2].dy) {
        GST_ERROR_OBJECT (self,
            "Different sub-sampling of chroma planes not supported");
        return GST_FLOW_ERROR;
      }

      if (image->numcomps == 4) {
        if (image->comps[3].dx != 1 || image->comps[3].dy != 1) {
          GST_ERROR_OBJECT (self, "Sub-sampling of alpha plane not supported");
          return GST_FLOW_NOT_NEGOTIATED;
        }

        if (get_highest_prec (image) == 8) {
          self->fill_frame = fill_frame_planar8_4_generic;
          format = GST_VIDEO_FORMAT_AYUV;
        } else if (image->comps[3].prec <= 16) {
          self->fill_frame = fill_frame_planar16_4_generic;
          format = GST_VIDEO_FORMAT_AYUV64;
        } else {
          GST_ERROR_OBJECT (self, "Unsupported depth %d", image->comps[0].prec);
          return GST_FLOW_NOT_NEGOTIATED;
        }
      } else if (image->numcomps == 3) {
        if (get_highest_prec (image) == 8) {
          if (image->comps[1].dx == 1 && image->comps[1].dy == 1) {
            self->fill_frame = fill_frame_planar8_3;
            format = GST_VIDEO_FORMAT_Y444;
          } else if (image->comps[1].dx == 2 && image->comps[1].dy == 1) {
            self->fill_frame = fill_frame_planar8_3;
            format = GST_VIDEO_FORMAT_Y42B;
          } else if (image->comps[1].dx == 2 && image->comps[1].dy == 2) {
            self->fill_frame = fill_frame_planar8_3;
            format = GST_VIDEO_FORMAT_I420;
          } else if (image->comps[1].dx == 4 && image->comps[1].dy == 1) {
            self->fill_frame = fill_frame_planar8_3;
            format = GST_VIDEO_FORMAT_Y41B;
          } else if (image->comps[1].dx == 4 && image->comps[1].dy == 4) {
            self->fill_frame = fill_frame_planar8_3;
            format = GST_VIDEO_FORMAT_YUV9;
          } else {
            self->fill_frame = fill_frame_planar8_3_generic;
            format = GST_VIDEO_FORMAT_AYUV;
          }
        } else if (get_highest_prec (image) <= 16) {
          if (image->comps[0].prec == 10 &&
              image->comps[1].prec == 10 && image->comps[2].prec == 10) {
            if (image->comps[1].dx == 1 && image->comps[1].dy == 1) {
              self->fill_frame = fill_frame_planar16_3;
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
              format = GST_VIDEO_FORMAT_Y444_10LE;
#else
              format = GST_VIDEO_FORMAT_Y444_10BE;
#endif
            } else if (image->comps[1].dx == 2 && image->comps[1].dy == 1) {
              self->fill_frame = fill_frame_planar16_3;
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
              format = GST_VIDEO_FORMAT_I422_10LE;
#else
              format = GST_VIDEO_FORMAT_I422_10BE;
#endif
            } else if (image->comps[1].dx == 2 && image->comps[1].dy == 2) {
              self->fill_frame = fill_frame_planar16_3;
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
              format = GST_VIDEO_FORMAT_I420_10LE;
#else
              format = GST_VIDEO_FORMAT_I420_10BE;
#endif
            } else {
              self->fill_frame = fill_frame_planar16_3_generic;
              format = GST_VIDEO_FORMAT_AYUV64;
            }
          } else {
            self->fill_frame = fill_frame_planar16_3_generic;
            format = GST_VIDEO_FORMAT_AYUV64;
          }
        } else {
          GST_ERROR_OBJECT (self, "Unsupported depth %d",
              get_highest_prec (image));
          return GST_FLOW_NOT_NEGOTIATED;
        }
      } else {
        GST_ERROR_OBJECT (self, "Unsupported number of YUV components: %d",
            image->numcomps);
        return GST_FLOW_NOT_NEGOTIATED;
      }
      break;
    default:
      GST_ERROR_OBJECT (self, "Unsupported colorspace %d", image->color_space);
      return GST_FLOW_NOT_NEGOTIATED;
  }

  width = image->x1 - image->x0;
  height = image->y1 - image->y0;

  if (!self->output_state ||
      self->output_state->info.finfo->format != format ||
      self->output_state->info.width != width ||
      self->output_state->info.height != height) {
    if (self->output_state)
      gst_video_codec_state_unref (self->output_state);
    self->output_state =
        gst_video_decoder_set_output_state (GST_VIDEO_DECODER (self), format,
        width, height, self->input_state);

    if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self)))
      return GST_FLOW_NOT_NEGOTIATED;
  }

  return GST_FLOW_OK;
}
コード例 #22
0
static gboolean
gst_vaapidecode_update_src_caps (GstVaapiDecode * decode)
{
  GstVideoDecoder *const vdec = GST_VIDEO_DECODER (decode);
  GstPad *const srcpad = GST_VIDEO_DECODER_SRC_PAD (vdec);
  GstCaps *allowed;
  GstVideoCodecState *state, *ref_state;
  GstVaapiCapsFeature feature;
  GstCapsFeatures *features;
  GstCaps *allocation_caps;
  GstVideoInfo *vi;
  GstVideoFormat format;
  GstClockTime latency;
  gint fps_d, fps_n;
  guint width, height;
  const gchar *format_str, *feature_str;

  if (!decode->input_state)
    return FALSE;

  ref_state = decode->input_state;

  format = GST_VIDEO_INFO_FORMAT (&decode->decoded_info);
  allowed = gst_vaapidecode_get_allowed_srcpad_caps (decode);
  feature = gst_vaapi_find_preferred_caps_feature (srcpad, allowed, &format);
  gst_caps_unref (allowed);

  if (feature == GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED)
    return FALSE;

#if (!USE_GLX && !USE_EGL)
  /* This is a very pathological situation. Should not happen. */
  if (feature == GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META)
    return FALSE;
#endif

  if ((feature == GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY ||
          feature == GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE)
      && format != GST_VIDEO_INFO_FORMAT (&decode->decoded_info)) {
    GST_FIXME_OBJECT (decode, "validate if driver can convert from %s to %s",
        gst_video_format_to_string (GST_VIDEO_INFO_FORMAT
            (&decode->decoded_info)), gst_video_format_to_string (format));
  }

  width = decode->display_width;
  height = decode->display_height;

  if (!width || !height) {
    width = GST_VIDEO_INFO_WIDTH (&ref_state->info);
    height = GST_VIDEO_INFO_HEIGHT (&ref_state->info);
  }

  state = gst_video_decoder_set_output_state (vdec, format, width, height,
      ref_state);
  if (!state)
    return FALSE;

  if (GST_VIDEO_INFO_WIDTH (&state->info) == 0
      || GST_VIDEO_INFO_HEIGHT (&state->info) == 0) {
    gst_video_codec_state_unref (state);
    return FALSE;
  }

  vi = &state->info;
  state->caps = gst_video_info_to_caps (vi);

  switch (feature) {
    case GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META:
    case GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE:{
      GstStructure *structure = gst_caps_get_structure (state->caps, 0);

      /* Remove chroma-site and colorimetry from src caps,
       * which is unnecessary on downstream if using VASurface
       */
      gst_structure_remove_fields (structure, "chroma-site", "colorimetry",
          NULL);

      feature_str = gst_vaapi_caps_feature_to_string (feature);
      features = gst_caps_features_new (feature_str, NULL);
      gst_caps_set_features (state->caps, 0, features);
      break;
    }
    default:
      break;
  }

  /* Allocation query is different from pad's caps */
  allocation_caps = NULL;
  if (GST_VIDEO_INFO_WIDTH (&decode->decoded_info) != width
      || GST_VIDEO_INFO_HEIGHT (&decode->decoded_info) != height) {
    allocation_caps = gst_caps_copy (state->caps);
    format_str = gst_video_format_to_string (format);
    gst_caps_set_simple (allocation_caps,
        "width", G_TYPE_INT, GST_VIDEO_INFO_WIDTH (&decode->decoded_info),
        "height", G_TYPE_INT, GST_VIDEO_INFO_HEIGHT (&decode->decoded_info),
        "format", G_TYPE_STRING, format_str, NULL);
    GST_INFO_OBJECT (decode, "new alloc caps = %" GST_PTR_FORMAT,
        allocation_caps);
  }
  gst_caps_replace (&state->allocation_caps, allocation_caps);
  if (allocation_caps)
    gst_caps_unref (allocation_caps);

  GST_INFO_OBJECT (decode, "new src caps = %" GST_PTR_FORMAT, state->caps);
  gst_caps_replace (&decode->srcpad_caps, state->caps);
  gst_video_codec_state_unref (state);

  fps_n = GST_VIDEO_INFO_FPS_N (vi);
  fps_d = GST_VIDEO_INFO_FPS_D (vi);
  if (fps_n <= 0 || fps_d <= 0) {
    GST_DEBUG_OBJECT (decode, "forcing 25/1 framerate for latency calculation");
    fps_n = 25;
    fps_d = 1;
  }

  /* For parsing/preparation purposes we'd need at least 1 frame
   * latency in general, with perfectly known unit boundaries (NALU,
   * AU), and up to 2 frames when we need to wait for the second frame
   * start to determine the first frame is complete */
  latency = gst_util_uint64_scale (2 * GST_SECOND, fps_d, fps_n);
  gst_video_decoder_set_latency (vdec, latency, latency);

  return TRUE;
}
コード例 #23
0
ファイル: gstpngdec.c プロジェクト: DylanZA/gst-plugins-good
static GstFlowReturn
gst_pngdec_caps_create_and_set (GstPngDec * pngdec)
{
  GstFlowReturn ret = GST_FLOW_OK;
  gint bpc = 0, color_type;
  png_uint_32 width, height;
  GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;

  g_return_val_if_fail (GST_IS_PNGDEC (pngdec), GST_FLOW_ERROR);

  /* Get bits per channel */
  bpc = png_get_bit_depth (pngdec->png, pngdec->info);

  /* Get Color type */
  color_type = png_get_color_type (pngdec->png, pngdec->info);

  /* Add alpha channel if 16-bit depth, but not for GRAY images */
  if ((bpc > 8) && (color_type != PNG_COLOR_TYPE_GRAY)) {
    png_set_add_alpha (pngdec->png, 0xffff, PNG_FILLER_BEFORE);
    png_set_swap (pngdec->png);
  }
#if 0
  /* We used to have this HACK to reverse the outgoing bytes, but the problem
   * that originally required the hack seems to have been in videoconvert's
   * RGBA descriptions. It doesn't seem needed now that's fixed, but might
   * still be needed on big-endian systems, I'm not sure. J.S. 6/7/2007 */
  if (color_type == PNG_COLOR_TYPE_RGB_ALPHA)
    png_set_bgr (pngdec->png);
#endif

  /* Gray scale with alpha channel converted to RGB */
  if (color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
    GST_LOG_OBJECT (pngdec,
        "converting grayscale png with alpha channel to RGB");
    png_set_gray_to_rgb (pngdec->png);
  }

  /* Gray scale converted to upscaled to 8 bits */
  if ((color_type == PNG_COLOR_TYPE_GRAY_ALPHA) ||
      (color_type == PNG_COLOR_TYPE_GRAY)) {
    if (bpc < 8) {              /* Convert to 8 bits */
      GST_LOG_OBJECT (pngdec, "converting grayscale image to 8 bits");
#if PNG_LIBPNG_VER < 10400
      png_set_gray_1_2_4_to_8 (pngdec->png);
#else
      png_set_expand_gray_1_2_4_to_8 (pngdec->png);
#endif
    }
  }

  /* Palette converted to RGB */
  if (color_type == PNG_COLOR_TYPE_PALETTE) {
    GST_LOG_OBJECT (pngdec, "converting palette png to RGB");
    png_set_palette_to_rgb (pngdec->png);
  }

  png_set_interlace_handling (pngdec->png);

  /* Update the info structure */
  png_read_update_info (pngdec->png, pngdec->info);

  /* Get IHDR header again after transformation settings */
  png_get_IHDR (pngdec->png, pngdec->info, &width, &height,
      &bpc, &pngdec->color_type, NULL, NULL, NULL);

  GST_LOG_OBJECT (pngdec, "this is a %dx%d PNG image", (gint) width,
      (gint) height);

  switch (pngdec->color_type) {
    case PNG_COLOR_TYPE_RGB:
      GST_LOG_OBJECT (pngdec, "we have no alpha channel, depth is 24 bits");
      if (bpc == 8)
        format = GST_VIDEO_FORMAT_RGB;
      break;
    case PNG_COLOR_TYPE_RGB_ALPHA:
      GST_LOG_OBJECT (pngdec,
          "we have an alpha channel, depth is 32 or 64 bits");
      if (bpc == 8)
        format = GST_VIDEO_FORMAT_RGBA;
      else if (bpc == 16)
        format = GST_VIDEO_FORMAT_ARGB64;
      break;
    case PNG_COLOR_TYPE_GRAY:
      GST_LOG_OBJECT (pngdec,
          "We have an gray image, depth is 8 or 16 (be) bits");
      if (bpc == 8)
        format = GST_VIDEO_FORMAT_GRAY8;
      else if (bpc == 16)
        format = GST_VIDEO_FORMAT_GRAY16_BE;
      break;
    default:
      break;
  }

  if (format == GST_VIDEO_FORMAT_UNKNOWN) {
    GST_ELEMENT_ERROR (pngdec, STREAM, NOT_IMPLEMENTED, (NULL),
        ("pngdec does not support this color type"));
    ret = GST_FLOW_NOT_SUPPORTED;
    goto beach;
  }

  /* Check if output state changed */
  if (pngdec->output_state) {
    GstVideoInfo *info = &pngdec->output_state->info;

    if (width == GST_VIDEO_INFO_WIDTH (info) &&
        height == GST_VIDEO_INFO_HEIGHT (info) &&
        GST_VIDEO_INFO_FORMAT (info) == format) {
      goto beach;
    }
    gst_video_codec_state_unref (pngdec->output_state);
  }

  pngdec->output_state =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (pngdec), format,
      width, height, pngdec->input_state);
  gst_video_decoder_negotiate (GST_VIDEO_DECODER (pngdec));
  GST_DEBUG ("Final %d %d", GST_VIDEO_INFO_WIDTH (&pngdec->output_state->info),
      GST_VIDEO_INFO_HEIGHT (&pngdec->output_state->info));

beach:
  return ret;
}
コード例 #24
0
static GstFlowReturn
gst_v4l2_video_dec_handle_frame (GstVideoDecoder * decoder,
    GstVideoCodecFrame * frame)
{
  GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
  GstFlowReturn ret = GST_FLOW_OK;

  GST_DEBUG_OBJECT (self, "Handling frame %d", frame->system_frame_number);

  if (G_UNLIKELY (!g_atomic_int_get (&self->active)))
    goto flushing;

  if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2output))) {
    if (!self->input_state)
      goto not_negotiated;
    if (!gst_v4l2_object_set_format (self->v4l2output, self->input_state->caps))
      goto not_negotiated;
  }

  if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2capture))) {
    GstBufferPool *pool = GST_BUFFER_POOL (self->v4l2output->pool);
    GstVideoInfo info;
    GstVideoCodecState *output_state;
    GstBuffer *codec_data;

    GST_DEBUG_OBJECT (self, "Sending header");

    codec_data = self->input_state->codec_data;

    /* We are running in byte-stream mode, so we don't know the headers, but
     * we need to send something, otherwise the decoder will refuse to
     * intialize.
     */
    if (codec_data) {
      gst_buffer_ref (codec_data);
    } else {
      codec_data = frame->input_buffer;
      frame->input_buffer = NULL;
    }

    /* Ensure input internal pool is active */
    if (!gst_buffer_pool_is_active (pool)) {
      GstStructure *config = gst_buffer_pool_get_config (pool);
      gst_buffer_pool_config_set_params (config, self->input_state->caps,
          self->v4l2output->info.size, 2, 2);

      /* There is no reason to refuse this config */
      if (!gst_buffer_pool_set_config (pool, config))
        goto activate_failed;

      if (!gst_buffer_pool_set_active (pool, TRUE))
        goto activate_failed;
    }

    GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
    ret =
        gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->
            v4l2output->pool), &codec_data);
    GST_VIDEO_DECODER_STREAM_LOCK (decoder);

    gst_buffer_unref (codec_data);

    if (!gst_v4l2_object_acquire_format (self->v4l2capture, &info))
      goto not_negotiated;

    output_state = gst_video_decoder_set_output_state (decoder,
        info.finfo->format, info.width, info.height, self->input_state);

    /* Copy the rest of the information, there might be more in the future */
    output_state->info.interlace_mode = info.interlace_mode;
    gst_video_codec_state_unref (output_state);

    if (!gst_video_decoder_negotiate (decoder)) {
      if (GST_PAD_IS_FLUSHING (decoder->srcpad))
        goto flushing;
      else
        goto not_negotiated;
    }

    /* Ensure our internal pool is activated */
    if (!gst_buffer_pool_set_active (GST_BUFFER_POOL (self->v4l2capture->pool),
            TRUE))
      goto activate_failed;
  }

  if (g_atomic_int_get (&self->processing) == FALSE) {
    /* It's possible that the processing thread stopped due to an error */
    if (self->output_flow != GST_FLOW_OK &&
        self->output_flow != GST_FLOW_FLUSHING) {
      GST_DEBUG_OBJECT (self, "Processing loop stopped with error, leaving");
      ret = self->output_flow;
      goto drop;
    }

    GST_DEBUG_OBJECT (self, "Starting decoding thread");

    /* Start the processing task, when it quits, the task will disable input
     * processing to unlock input if draining, or prevent potential block */
    g_atomic_int_set (&self->processing, TRUE);
    if (!gst_pad_start_task (decoder->srcpad,
            (GstTaskFunction) gst_v4l2_video_dec_loop, self,
            (GDestroyNotify) gst_v4l2_video_dec_loop_stopped))
      goto start_task_failed;
  }

  if (frame->input_buffer) {
    GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
    ret =
        gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->v4l2output->
            pool), &frame->input_buffer);
    GST_VIDEO_DECODER_STREAM_LOCK (decoder);

    if (ret == GST_FLOW_FLUSHING) {
      if (g_atomic_int_get (&self->processing) == FALSE)
        ret = self->output_flow;
      goto drop;
    } else if (ret != GST_FLOW_OK) {
      goto process_failed;
    }

    /* No need to keep input arround */
    gst_buffer_replace (&frame->input_buffer, NULL);
  }

  gst_video_codec_frame_unref (frame);
  return ret;

  /* ERRORS */
not_negotiated:
  {
    GST_ERROR_OBJECT (self, "not negotiated");
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto drop;
  }
activate_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS,
        (_("Failed to allocate required memory.")),
        ("Buffer pool activation failed"));
    ret = GST_FLOW_ERROR;
    goto drop;
  }
flushing:
  {
    ret = GST_FLOW_FLUSHING;
    goto drop;
  }

start_task_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
        (_("Failed to start decoding thread.")), (NULL));
    g_atomic_int_set (&self->processing, FALSE);
    ret = GST_FLOW_ERROR;
    goto drop;
  }
process_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
        (_("Failed to process frame.")),
        ("Maybe be due to not enough memory or failing driver"));
    ret = GST_FLOW_ERROR;
    goto drop;
  }
drop:
  {
    gst_video_decoder_drop_frame (decoder, frame);
    return ret;
  }
}
コード例 #25
0
static gboolean
gst_msdkdec_set_src_caps (GstMsdkDec * thiz, gboolean need_allocation)
{
  GstVideoCodecState *output_state;
  GstVideoInfo *vinfo;
  GstVideoAlignment align;
  GstCaps *allocation_caps = NULL;
  GstVideoFormat format;
  guint width, height;
  const gchar *format_str;

  /* use display width and display height in output state which
   * will be using for caps negotiation */
  width =
      thiz->param.mfx.FrameInfo.CropW ? thiz->param.mfx.
      FrameInfo.CropW : GST_VIDEO_INFO_WIDTH (&thiz->input_state->info);
  height =
      thiz->param.mfx.FrameInfo.CropH ? thiz->param.mfx.
      FrameInfo.CropH : GST_VIDEO_INFO_HEIGHT (&thiz->input_state->info);

  format =
      gst_msdk_get_video_format_from_mfx_fourcc (thiz->param.mfx.
      FrameInfo.FourCC);

  if (format == GST_VIDEO_FORMAT_UNKNOWN) {
    GST_WARNING_OBJECT (thiz, "Failed to find a valid video format\n");
    return FALSE;
  }

  output_state =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (thiz),
      format, width, height, thiz->input_state);
  if (!output_state)
    return FALSE;

  /* Ensure output_state->caps and info has same width and height
   * Also mandate the 32 bit alignment */
  vinfo = &output_state->info;
  gst_msdk_set_video_alignment (vinfo, &align);
  gst_video_info_align (vinfo, &align);
  output_state->caps = gst_video_info_to_caps (vinfo);
  if (srcpad_can_dmabuf (thiz))
    gst_caps_set_features (output_state->caps, 0,
        gst_caps_features_new (GST_CAPS_FEATURE_MEMORY_DMABUF, NULL));
  thiz->output_info = output_state->info;

  if (need_allocation) {
    /* Find allocation width and height */
    width =
        GST_ROUND_UP_16 (thiz->param.mfx.FrameInfo.Width ? thiz->param.mfx.
        FrameInfo.Width : GST_VIDEO_INFO_WIDTH (&output_state->info));
    height =
        GST_ROUND_UP_32 (thiz->param.mfx.FrameInfo.Height ? thiz->param.mfx.
        FrameInfo.Height : GST_VIDEO_INFO_HEIGHT (&output_state->info));

    /* set allocation width and height in allocation_caps
     * which may or may not be similar to the output_state caps */
    allocation_caps = gst_caps_copy (output_state->caps);
    format_str =
        gst_video_format_to_string (GST_VIDEO_INFO_FORMAT (&thiz->output_info));
    gst_caps_set_simple (allocation_caps, "width", G_TYPE_INT, width, "height",
        G_TYPE_INT, height, "format", G_TYPE_STRING, format_str, NULL);
    GST_INFO_OBJECT (thiz, "new alloc caps = %" GST_PTR_FORMAT,
        allocation_caps);
    gst_caps_replace (&thiz->allocation_caps, allocation_caps);
  } else {
    /* We keep the allocation parameters as it is to avoid pool renegotiation.
     * For codecs like VP9, dynamic resolution change doesn't requires allocation
     * reset if the new video frame resolution is lower than the
     * already configured one */
    allocation_caps = gst_caps_copy (thiz->allocation_caps);
  }

  gst_caps_replace (&output_state->allocation_caps, allocation_caps);
  if (allocation_caps)
    gst_caps_unref (allocation_caps);

  gst_video_codec_state_unref (output_state);
  return TRUE;
}
コード例 #26
0
static GstFlowReturn
theora_handle_type_packet (GstTheoraDec * dec)
{
  gint par_num, par_den;
  GstFlowReturn ret = GST_FLOW_OK;
  GstVideoCodecState *state;
  GstVideoFormat fmt;
  GstVideoInfo *info = &dec->input_state->info;

  GST_DEBUG_OBJECT (dec, "fps %d/%d, PAR %d/%d",
      dec->info.fps_numerator, dec->info.fps_denominator,
      dec->info.aspect_numerator, dec->info.aspect_denominator);

  /* calculate par
   * the info.aspect_* values reflect PAR;
   * 0:x and x:0 are allowed and can be interpreted as 1:1.
   */
  par_num = GST_VIDEO_INFO_PAR_N (info);
  par_den = GST_VIDEO_INFO_PAR_D (info);

  /* If we have a default PAR, see if the decoder specified a different one */
  if (par_num == 1 && par_den == 1 &&
      (dec->info.aspect_numerator != 0 && dec->info.aspect_denominator != 0)) {
    par_num = dec->info.aspect_numerator;
    par_den = dec->info.aspect_denominator;
  }
  /* theora has:
   *
   *  width/height : dimension of the encoded frame 
   *  pic_width/pic_height : dimension of the visible part
   *  pic_x/pic_y : offset in encoded frame where visible part starts
   */
  GST_DEBUG_OBJECT (dec, "dimension %dx%d, PAR %d/%d", dec->info.pic_width,
      dec->info.pic_height, par_num, par_den);
  GST_DEBUG_OBJECT (dec, "frame dimension %dx%d, offset %d:%d",
      dec->info.pic_width, dec->info.pic_height,
      dec->info.pic_x, dec->info.pic_y);

  switch (dec->info.pixel_fmt) {
    case TH_PF_420:
      fmt = GST_VIDEO_FORMAT_I420;
      break;
    case TH_PF_422:
      fmt = GST_VIDEO_FORMAT_Y42B;
      break;
    case TH_PF_444:
      fmt = GST_VIDEO_FORMAT_Y444;
      break;
    default:
      goto unsupported_format;
  }

  GST_VIDEO_INFO_WIDTH (info) = dec->info.pic_width;
  GST_VIDEO_INFO_HEIGHT (info) = dec->info.pic_height;

  /* Ensure correct offsets in chroma for formats that need it
   * by rounding the offset. libtheora will add proper pixels,
   * so no need to handle them ourselves. */
  if (dec->info.pic_x & 1 && dec->info.pixel_fmt != TH_PF_444) {
    GST_VIDEO_INFO_WIDTH (info)++;
  }
  if (dec->info.pic_y & 1 && dec->info.pixel_fmt == TH_PF_420) {
    GST_VIDEO_INFO_HEIGHT (info)++;
  }

  GST_DEBUG_OBJECT (dec, "after fixup frame dimension %dx%d, offset %d:%d",
      info->width, info->height, dec->info.pic_x, dec->info.pic_y);

  /* done */
  dec->decoder = th_decode_alloc (&dec->info, dec->setup);

  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MV,
          &dec->telemetry_mv, sizeof (dec->telemetry_mv)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable MV visualisation");
  }
  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MBMODE,
          &dec->telemetry_mbmode, sizeof (dec->telemetry_mbmode)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable MB mode visualisation");
  }
  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_QI,
          &dec->telemetry_qi, sizeof (dec->telemetry_qi)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable QI mode visualisation");
  }
  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_BITS,
          &dec->telemetry_bits, sizeof (dec->telemetry_bits)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable BITS mode visualisation");
  }

  /* Create the output state */
  dec->output_state = state =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), fmt,
      info->width, info->height, dec->input_state);

  /* FIXME : Do we still need to set fps/par now that we pass the reference input stream ? */
  state->info.fps_n = dec->info.fps_numerator;
  state->info.fps_d = dec->info.fps_denominator;
  state->info.par_n = par_num;
  state->info.par_d = par_den;

  /* these values are for all versions of the colorspace specified in the
   * theora info */
  state->info.chroma_site = GST_VIDEO_CHROMA_SITE_JPEG;
  state->info.colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235;
  state->info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601;
  state->info.colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
  switch (dec->info.colorspace) {
    case TH_CS_ITU_REC_470M:
      state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470M;
      break;
    case TH_CS_ITU_REC_470BG:
      state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG;
      break;
    default:
      state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN;
      break;
  }

  gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec));

  dec->have_header = TRUE;

  return ret;

  /* ERRORS */
unsupported_format:
  {
    GST_ERROR_OBJECT (dec, "Invalid pixel format %d", dec->info.pixel_fmt);
    return GST_FLOW_ERROR;
  }
}
コード例 #27
0
static GstFlowReturn
gst_vp9_dec_handle_frame (GstVideoDecoder * decoder, GstVideoCodecFrame * frame)
{
  GstVP9Dec *dec;
  GstFlowReturn ret = GST_FLOW_OK;
  vpx_codec_err_t status;
  vpx_codec_iter_t iter = NULL;
  vpx_image_t *img;
  long decoder_deadline = 0;
  GstClockTimeDiff deadline;
  GstMapInfo minfo;

  GST_DEBUG_OBJECT (decoder, "handle_frame");

  dec = GST_VP9_DEC (decoder);

  if (!dec->decoder_inited) {
    ret = open_codec (dec, frame);
    if (ret == GST_FLOW_CUSTOM_SUCCESS_1)
      return GST_FLOW_OK;
    else if (ret != GST_FLOW_OK)
      return ret;
  }

  deadline = gst_video_decoder_get_max_decode_time (decoder, frame);
  if (deadline < 0) {
    decoder_deadline = 1;
  } else if (deadline == G_MAXINT64) {
    decoder_deadline = 0;
  } else {
    decoder_deadline = MAX (1, deadline / GST_MSECOND);
  }

  if (!gst_buffer_map (frame->input_buffer, &minfo, GST_MAP_READ)) {
    GST_ERROR_OBJECT (dec, "Failed to map input buffer");
    return GST_FLOW_ERROR;
  }

  status = vpx_codec_decode (&dec->decoder,
      minfo.data, minfo.size, NULL, decoder_deadline);

  gst_buffer_unmap (frame->input_buffer, &minfo);

  if (status) {
    GST_VIDEO_DECODER_ERROR (decoder, 1, LIBRARY, ENCODE,
        ("Failed to decode frame"), ("%s", gst_vpx_error_name (status)), ret);
    return ret;
  }

  img = vpx_codec_get_frame (&dec->decoder, &iter);
  if (img) {
    GstVideoFormat fmt;

    switch (img->fmt) {
      case VPX_IMG_FMT_I420:
        fmt = GST_VIDEO_FORMAT_I420;
        break;
      case VPX_IMG_FMT_YV12:
        fmt = GST_VIDEO_FORMAT_YV12;
        break;
      case VPX_IMG_FMT_I422:
        fmt = GST_VIDEO_FORMAT_Y42B;
        break;
      case VPX_IMG_FMT_I444:
        fmt = GST_VIDEO_FORMAT_Y444;
        break;
      default:
        vpx_img_free (img);
        GST_ELEMENT_ERROR (decoder, LIBRARY, ENCODE,
            ("Failed to decode frame"), ("Unsupported color format %d",
                img->fmt));
        return GST_FLOW_ERROR;
        break;
    }

    if (!dec->output_state || dec->output_state->info.finfo->format != fmt ||
        dec->output_state->info.width != img->d_w ||
        dec->output_state->info.height != img->d_h) {
      gboolean send_tags = !dec->output_state;

      if (dec->output_state)
        gst_video_codec_state_unref (dec->output_state);

      dec->output_state =
          gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec),
          fmt, img->d_w, img->d_h, dec->input_state);
      gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec));

      if (send_tags)
        gst_vp9_dec_send_tags (dec);
    }

    if (deadline < 0) {
      GST_LOG_OBJECT (dec, "Skipping late frame (%f s past deadline)",
          (double) -deadline / GST_SECOND);
      gst_video_decoder_drop_frame (decoder, frame);
    } else {
      ret = gst_video_decoder_allocate_output_frame (decoder, frame);

      if (ret == GST_FLOW_OK) {
        gst_vp9_dec_image_to_buffer (dec, img, frame->output_buffer);
        ret = gst_video_decoder_finish_frame (decoder, frame);
      } else {
        gst_video_decoder_drop_frame (decoder, frame);
      }
    }

    vpx_img_free (img);

    while ((img = vpx_codec_get_frame (&dec->decoder, &iter))) {
      GST_WARNING_OBJECT (decoder, "Multiple decoded frames... dropping");
      vpx_img_free (img);
    }
  } else {
    /* Invisible frame */
    GST_VIDEO_CODEC_FRAME_SET_DECODE_ONLY (frame);
    gst_video_decoder_finish_frame (decoder, frame);
  }

  return ret;
}