コード例 #1
0
ファイル: vtdec.c プロジェクト: shakin/gst-plugins-bad
static void
gst_vtdec_session_output_callback (void *decompression_output_ref_con,
    void *source_frame_ref_con, OSStatus status, VTDecodeInfoFlags info_flags,
    CVImageBufferRef image_buffer, CMTime pts, CMTime duration)
{
  GstVtdec *vtdec = (GstVtdec *) decompression_output_ref_con;
  GstVideoCodecFrame *frame = (GstVideoCodecFrame *) source_frame_ref_con;
  GstBuffer *buf;
  GstVideoCodecState *state;

  GST_LOG_OBJECT (vtdec, "got output frame %p %d and VT buffer %p", frame,
      frame->decode_frame_number, image_buffer);

  if (status != noErr) {
    GST_ERROR_OBJECT (vtdec, "Error decoding frame %d", (int) status);
    goto drop;
  }

  if (image_buffer == NULL) {
    if (info_flags & kVTDecodeInfo_FrameDropped)
      GST_DEBUG_OBJECT (vtdec, "Frame dropped by video toolbox");
    else
      GST_DEBUG_OBJECT (vtdec, "Decoded frame is NULL");
    goto drop;
  }

  /* FIXME: use gst_video_decoder_allocate_output_buffer */
  state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (vtdec));
  if (state == NULL) {
    GST_WARNING_OBJECT (vtdec, "Output state not configured, release buffer");
    /* release as this usually means that the baseclass isn't ready to do
     * the QoS that _drop requires and will lead to an assertion with the
     * segment.format being undefined */
    goto release;
  }
  buf =
      gst_core_video_buffer_new (image_buffer, &state->info,
      vtdec->texture_cache == NULL);
  gst_video_codec_state_unref (state);

  GST_BUFFER_PTS (buf) = pts.value;
  GST_BUFFER_DURATION (buf) = duration.value;
  frame->output_buffer = buf;
  g_async_queue_push_sorted (vtdec->reorder_queue, frame,
      sort_frames_by_pts, NULL);

  return;

drop:
  GST_WARNING_OBJECT (vtdec, "Frame dropped %p %d", frame,
      frame->decode_frame_number);
  gst_video_decoder_drop_frame (GST_VIDEO_DECODER (vtdec), frame);
  return;

release:
  GST_WARNING_OBJECT (vtdec, "Frame released %p %d", frame,
      frame->decode_frame_number);
  gst_video_decoder_release_frame (GST_VIDEO_DECODER (vtdec), frame);
  return;
}
コード例 #2
0
ファイル: gstmpeg2dec.c プロジェクト: PeterXu/gst-mobile
static GstFlowReturn
gst_mpeg2dec_alloc_sized_buf (GstMpeg2dec * mpeg2dec, guint size,
    GstVideoCodecFrame * frame, GstBuffer ** buffer)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstVideoCodecState *state;

  state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (mpeg2dec));

  if (!mpeg2dec->need_cropping || mpeg2dec->has_cropping) {
    /* need parsed input, but that might be slightly bogus,
     * so avoid giving up altogether and mark it as error */
    if (frame->output_buffer) {
      gst_buffer_replace (&frame->output_buffer, NULL);
      GST_VIDEO_DECODER_ERROR (mpeg2dec, 1, STREAM, DECODE,
          ("decoding error"), ("Input not correctly parsed"), ret);
    }
    ret =
        gst_video_decoder_allocate_output_frame (GST_VIDEO_DECODER (mpeg2dec),
        frame);
    *buffer = frame->output_buffer;
  } else {
    GstAllocationParams params = { 0, 15, 0, 0 };

    *buffer = gst_buffer_new_allocate (NULL, size, &params);
    gst_video_codec_frame_set_user_data (frame, *buffer,
        (GDestroyNotify) frame_user_data_destroy_notify);
  }

  gst_video_codec_state_unref (state);

  return ret;
}
コード例 #3
0
static gboolean gst_openh264dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query)
{
  GstVideoCodecState *state;
  GstBufferPool *pool;
  guint size, min, max;
  GstStructure *config;

  if (!GST_VIDEO_DECODER_CLASS (gst_openh264dec_parent_class)->decide_allocation (decoder,
          query))
    return FALSE;

  state = gst_video_decoder_get_output_state (decoder);

  gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);

  config = gst_buffer_pool_get_config (pool);
  if (gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL)) {
    gst_buffer_pool_config_add_option (config,
        GST_BUFFER_POOL_OPTION_VIDEO_META);
  }

  gst_buffer_pool_set_config (pool, config);

  gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);

  gst_object_unref (pool);
  gst_video_codec_state_unref (state);

  return TRUE;
}
コード例 #4
0
ファイル: gstmpeg2dec.c プロジェクト: PeterXu/gst-mobile
static GstFlowReturn
gst_mpeg2dec_alloc_buffer (GstMpeg2dec * mpeg2dec, GstVideoCodecFrame * frame,
    GstBuffer ** buffer)
{
  GstFlowReturn ret;
  GstVideoFrame vframe;
  guint8 *buf[3];

  ret =
      gst_mpeg2dec_alloc_sized_buf (mpeg2dec, mpeg2dec->decoded_info.size,
      frame, buffer);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto beach;

  if (mpeg2dec->need_cropping && mpeg2dec->has_cropping) {
    GstVideoCropMeta *crop;
    GstVideoCodecState *state;
    GstVideoInfo *vinfo;

    state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (mpeg2dec));
    vinfo = &state->info;

    crop = gst_buffer_add_video_crop_meta (frame->output_buffer);
    /* we can do things slightly more efficient when we know that
     * downstream understands clipping */
    crop->x = 0;
    crop->y = 0;
    crop->width = vinfo->width;
    crop->height = vinfo->height;

    gst_video_codec_state_unref (state);
  }

  if (!gst_video_frame_map (&vframe, &mpeg2dec->decoded_info, *buffer,
          GST_MAP_READ | GST_MAP_WRITE))
    goto map_fail;

  buf[0] = GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0);
  buf[1] = GST_VIDEO_FRAME_PLANE_DATA (&vframe, 1);
  buf[2] = GST_VIDEO_FRAME_PLANE_DATA (&vframe, 2);

  GST_DEBUG_OBJECT (mpeg2dec, "set_buf: %p %p %p, frame %i",
      buf[0], buf[1], buf[2], frame->system_frame_number);

  /* Note: We use a non-null 'id' value to make the distinction
   * between the dummy buffers (which have an id of NULL) and the
   * ones we did */
  mpeg2_set_buf (mpeg2dec->decoder, buf,
      GINT_TO_POINTER (frame->system_frame_number + 1));
  gst_mpeg2dec_save_buffer (mpeg2dec, frame->system_frame_number, &vframe);

beach:
  return ret;

map_fail:
  {
    GST_ERROR_OBJECT (mpeg2dec, "Failed to map frame");
    return GST_FLOW_ERROR;
  }
}
コード例 #5
0
/* check whether display resolution changed */
static gboolean
is_display_resolution_changed (GstVaapiDecode * decode,
    const GstVaapiRectangle * crop_rect)
{
  GstVideoDecoder *const vdec = GST_VIDEO_DECODER (decode);
  GstVideoCodecState *state;
  guint display_width, display_height;
  guint negotiated_width, negotiated_height;

  display_width = GST_VIDEO_INFO_WIDTH (&decode->decoded_info);
  display_height = GST_VIDEO_INFO_HEIGHT (&decode->decoded_info);
  if (crop_rect) {
    display_width = crop_rect->width;
    display_height = crop_rect->height;
  }

  state = gst_video_decoder_get_output_state (vdec);
  if (G_UNLIKELY (!state))
    goto set_display_res;

  negotiated_width = GST_VIDEO_INFO_WIDTH (&state->info);
  negotiated_height = GST_VIDEO_INFO_HEIGHT (&state->info);
  gst_video_codec_state_unref (state);

  if ((display_width == negotiated_width && display_height == negotiated_height)
      && (decode->display_width == negotiated_width
          && decode->display_height == negotiated_height))
    return FALSE;

set_display_res:
  decode->display_width = display_width;
  decode->display_height = display_height;
  return TRUE;
}
コード例 #6
0
static GstFlowReturn
gst_mpeg2dec_crop_buffer (GstMpeg2dec * dec, GstVideoCodecFrame * in_frame,
    GstVideoFrame * input_vframe)
{
  GstVideoCodecState *state;
  GstVideoInfo *info;
  GstVideoInfo *dinfo;
  GstVideoFrame output_frame;
  GstFlowReturn ret;
  GstBuffer *buffer = NULL;

  state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (dec));
  info = &state->info;
  dinfo = &dec->decoded_info;

  GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, dec,
      "Copying input buffer %ux%u (%" G_GSIZE_FORMAT ") to output buffer "
      "%ux%u (%" G_GSIZE_FORMAT ")", dinfo->width, dinfo->height,
      dinfo->size, info->width, info->height, info->size);

  ret = gst_buffer_pool_acquire_buffer (dec->downstream_pool, &buffer, NULL);
  if (ret != GST_FLOW_OK)
    goto beach;

  if (!gst_video_frame_map (&output_frame, info, buffer, GST_MAP_WRITE))
    goto map_fail;

  if (in_frame->output_buffer)
    gst_buffer_unref (in_frame->output_buffer);
  in_frame->output_buffer = buffer;

  if (!gst_video_frame_copy (&output_frame, input_vframe))
    goto copy_failed;

  gst_video_frame_unmap (&output_frame);

  GST_BUFFER_FLAGS (in_frame->output_buffer) =
      GST_BUFFER_FLAGS (input_vframe->buffer);

beach:
  gst_video_codec_state_unref (state);

  return ret;

map_fail:
  {
    GST_ERROR_OBJECT (dec, "Failed to map output frame");
    gst_video_codec_state_unref (state);
    return GST_FLOW_ERROR;
  }

copy_failed:
  {
    GST_ERROR_OBJECT (dec, "Failed to copy output frame");
    gst_video_codec_state_unref (state);
    return GST_FLOW_ERROR;
  }
}
コード例 #7
0
ファイル: gstjpegdec.c プロジェクト: Lachann/gst-plugins-good
static void
gst_jpeg_dec_negotiate (GstJpegDec * dec, gint width, gint height, gint clrspc)
{
  GstVideoCodecState *outstate;
  GstVideoInfo *info;
  GstVideoFormat format;

  switch (clrspc) {
    case JCS_RGB:
      format = GST_VIDEO_FORMAT_RGB;
      break;
    case JCS_GRAYSCALE:
      format = GST_VIDEO_FORMAT_GRAY8;
      break;
    default:
      format = GST_VIDEO_FORMAT_I420;
      break;
  }

  /* Compare to currently configured output state */
  outstate = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (dec));
  if (outstate) {
    info = &outstate->info;

    if (width == GST_VIDEO_INFO_WIDTH (info) &&
        height == GST_VIDEO_INFO_HEIGHT (info) &&
        format == GST_VIDEO_INFO_FORMAT (info)) {
      gst_video_codec_state_unref (outstate);
      return;
    }
    gst_video_codec_state_unref (outstate);
  }

  outstate =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), format,
      width, height, dec->input_state);

  switch (clrspc) {
    case JCS_RGB:
    case JCS_GRAYSCALE:
      break;
    default:
      outstate->info.colorimetry.range = GST_VIDEO_COLOR_RANGE_0_255;
      outstate->info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601;
      outstate->info.colorimetry.transfer = GST_VIDEO_TRANSFER_UNKNOWN;
      outstate->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN;
      break;
  }

  gst_video_codec_state_unref (outstate);

  gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec));

  GST_DEBUG_OBJECT (dec, "max_v_samp_factor=%d", dec->cinfo.max_v_samp_factor);
  GST_DEBUG_OBJECT (dec, "max_h_samp_factor=%d", dec->cinfo.max_h_samp_factor);
}
コード例 #8
0
ファイル: vtdec.c プロジェクト: luisbg/gst-plugins-bad
static void
setup_texture_cache (GstVtdec * vtdec, GstGLContext * context)
{
  GstVideoCodecState *output_state;

  g_return_if_fail (vtdec->texture_cache == NULL);

  output_state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (vtdec));
  vtdec->texture_cache = gst_video_texture_cache_new (context);
  gst_video_texture_cache_set_format (vtdec->texture_cache,
      GST_VIDEO_FORMAT_NV12, output_state->caps);
  gst_video_codec_state_unref (output_state);
}
コード例 #9
0
static gboolean
theora_dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query)
{
  GstTheoraDec *dec = GST_THEORA_DEC (decoder);
  GstVideoCodecState *state;
  GstBufferPool *pool;
  guint size, min, max;
  GstStructure *config;

  if (!GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder,
          query))
    return FALSE;

  state = gst_video_decoder_get_output_state (decoder);

  gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);

  dec->can_crop = FALSE;
  config = gst_buffer_pool_get_config (pool);
  if (gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL)) {
    gst_buffer_pool_config_add_option (config,
        GST_BUFFER_POOL_OPTION_VIDEO_META);
    dec->can_crop =
        gst_query_find_allocation_meta (query, GST_VIDEO_CROP_META_API_TYPE,
        NULL);
  }

  if (dec->can_crop) {
    GstVideoInfo info = state->info;
    GstCaps *caps;

    /* Calculate uncropped size */
    gst_video_info_set_format (&info, info.finfo->format, dec->info.frame_width,
        dec->info.frame_height);
    size = MAX (size, info.size);
    caps = gst_video_info_to_caps (&info);
    gst_buffer_pool_config_set_params (config, caps, size, min, max);
    gst_caps_unref (caps);
  }

  gst_buffer_pool_set_config (pool, config);

  gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);

  gst_object_unref (pool);
  gst_video_codec_state_unref (state);

  return TRUE;
}
コード例 #10
0
ファイル: vtdec.c プロジェクト: luisbg/gst-plugins-bad
static void
gst_vtdec_session_output_callback (void *decompression_output_ref_con,
    void *source_frame_ref_con, OSStatus status, VTDecodeInfoFlags info_flags,
    CVImageBufferRef image_buffer, CMTime pts, CMTime duration)
{
  GstVtdec *vtdec = (GstVtdec *) decompression_output_ref_con;
  GstVideoCodecFrame *frame = (GstVideoCodecFrame *) source_frame_ref_con;
  GstVideoCodecState *state;

  GST_LOG_OBJECT (vtdec, "got output frame %p %d and VT buffer %p", frame,
      frame->decode_frame_number, image_buffer);

  frame->output_buffer = NULL;

  if (status != noErr) {
    GST_ERROR_OBJECT (vtdec, "Error decoding frame %d", (int) status);
  }

  if (image_buffer) {
    GstBuffer *buf = NULL;

    /* FIXME: use gst_video_decoder_allocate_output_buffer */
    state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (vtdec));
    if (state == NULL) {
      GST_WARNING_OBJECT (vtdec, "Output state not configured, release buffer");
      frame->flags &= VTDEC_FRAME_FLAG_SKIP;
    } else {
      buf =
          gst_core_video_buffer_new (image_buffer, &state->info,
          vtdec->texture_cache);
      gst_video_codec_state_unref (state);
      GST_BUFFER_PTS (buf) = pts.value;
      GST_BUFFER_DURATION (buf) = duration.value;
      frame->output_buffer = buf;
    }
  } else {
    if (info_flags & kVTDecodeInfo_FrameDropped) {
      GST_DEBUG_OBJECT (vtdec, "Frame dropped by video toolbox %p %d",
          frame, frame->decode_frame_number);
      frame->flags |= VTDEC_FRAME_FLAG_DROP;
    } else {
      GST_DEBUG_OBJECT (vtdec, "Decoded frame is NULL");
      frame->flags |= VTDEC_FRAME_FLAG_SKIP;
    }
  }

  g_async_queue_push_sorted (vtdec->reorder_queue, frame,
      sort_frames_by_pts, NULL);
}
コード例 #11
0
static gboolean
gst_amc_video_dec_fill_buffer (GstAmcVideoDec * self, GstAmcBuffer * buf,
    const GstAmcBufferInfo * buffer_info, GstBuffer * outbuf)
{
  GstVideoCodecState *state =
      gst_video_decoder_get_output_state (GST_VIDEO_DECODER (self));
  GstVideoInfo *info = &state->info;
  gboolean ret = FALSE;

  ret =
      gst_amc_color_format_copy (&self->color_format_info, buf, buffer_info,
      info, outbuf, COLOR_FORMAT_COPY_OUT);

  gst_video_codec_state_unref (state);
  return ret;
}
コード例 #12
0
ファイル: gstjpegdec.c プロジェクト: an146/gst-plugins-good
static void
gst_jpeg_dec_negotiate (GstJpegDec * dec, gint width, gint height, gint clrspc)
{
  GstVideoCodecState *outstate;
  GstVideoInfo *info;
  GstVideoFormat format;

  switch (clrspc) {
    case JCS_RGB:
      format = GST_VIDEO_FORMAT_RGB;
      break;
    case JCS_GRAYSCALE:
      format = GST_VIDEO_FORMAT_GRAY8;
      break;
    default:
      format = GST_VIDEO_FORMAT_I420;
      break;
  }

  /* Compare to currently configured output state */
  outstate = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (dec));
  if (outstate) {
    info = &outstate->info;

    if (width == GST_VIDEO_INFO_WIDTH (info) &&
        height == GST_VIDEO_INFO_HEIGHT (info) &&
        format == GST_VIDEO_INFO_FORMAT (info)) {
      gst_video_codec_state_unref (outstate);
      return;
    }
    gst_video_codec_state_unref (outstate);
  }

  outstate =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), format,
      width, height, dec->input_state);

  gst_video_codec_state_unref (outstate);

  GST_DEBUG_OBJECT (dec, "max_v_samp_factor=%d", dec->cinfo.max_v_samp_factor);
  GST_DEBUG_OBJECT (dec, "max_h_samp_factor=%d", dec->cinfo.max_h_samp_factor);
}
コード例 #13
0
static gboolean
is_surface_resolution_changed (GstVideoDecoder *vdec, GstVaapiSurface *surface)
{
  guint surface_width, surface_height;
  guint configured_width, configured_height;
  GstVideoCodecState *state;
  gboolean ret = FALSE;

  gst_vaapi_surface_get_size(surface, &surface_width, &surface_height);

  state = gst_video_decoder_get_output_state (vdec);
  configured_width = GST_VIDEO_INFO_WIDTH (&state->info);
  configured_height = GST_VIDEO_INFO_HEIGHT (&state->info);
  gst_video_codec_state_unref (state);

  if (surface_width != configured_width || surface_height != configured_height)
    ret = TRUE;

  return ret;
}
コード例 #14
0
ファイル: gstjpegdec.c プロジェクト: an146/gst-plugins-good
static GstFlowReturn
gst_jpeg_dec_handle_frame (GstVideoDecoder * bdec, GstVideoCodecFrame * frame)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstJpegDec *dec = (GstJpegDec *) bdec;
  GstVideoFrame vframe;
  gint width, height;
  gint r_h, r_v;
  guint code, hdr_ok;
  gboolean need_unmap = TRUE;
  GstVideoCodecState *state = NULL;

  dec->current_frame = frame;
  gst_buffer_map (frame->input_buffer, &dec->current_frame_map, GST_MAP_READ);
  gst_jpeg_dec_fill_input_buffer (&dec->cinfo);

  if (setjmp (dec->jerr.setjmp_buffer)) {
    code = dec->jerr.pub.msg_code;

    if (code == JERR_INPUT_EOF) {
      GST_DEBUG ("jpeg input EOF error, we probably need more data");
      goto need_more_data;
    }
    goto decode_error;
  }

  /* read header */
  hdr_ok = jpeg_read_header (&dec->cinfo, TRUE);
  if (G_UNLIKELY (hdr_ok != JPEG_HEADER_OK)) {
    GST_WARNING_OBJECT (dec, "reading the header failed, %d", hdr_ok);
  }

  GST_LOG_OBJECT (dec, "num_components=%d", dec->cinfo.num_components);
  GST_LOG_OBJECT (dec, "jpeg_color_space=%d", dec->cinfo.jpeg_color_space);

  if (!dec->cinfo.num_components || !dec->cinfo.comp_info)
    goto components_not_supported;

  r_h = dec->cinfo.comp_info[0].h_samp_factor;
  r_v = dec->cinfo.comp_info[0].v_samp_factor;

  GST_LOG_OBJECT (dec, "r_h = %d, r_v = %d", r_h, r_v);

  if (dec->cinfo.num_components > 3)
    goto components_not_supported;

  /* verify color space expectation to avoid going *boom* or bogus output */
  if (dec->cinfo.jpeg_color_space != JCS_YCbCr &&
      dec->cinfo.jpeg_color_space != JCS_GRAYSCALE &&
      dec->cinfo.jpeg_color_space != JCS_RGB)
    goto unsupported_colorspace;

#ifndef GST_DISABLE_GST_DEBUG
  {
    gint i;

    for (i = 0; i < dec->cinfo.num_components; ++i) {
      GST_LOG_OBJECT (dec, "[%d] h_samp_factor=%d, v_samp_factor=%d, cid=%d",
          i, dec->cinfo.comp_info[i].h_samp_factor,
          dec->cinfo.comp_info[i].v_samp_factor,
          dec->cinfo.comp_info[i].component_id);
    }
  }
#endif

  /* prepare for raw output */
  dec->cinfo.do_fancy_upsampling = FALSE;
  dec->cinfo.do_block_smoothing = FALSE;
  dec->cinfo.out_color_space = dec->cinfo.jpeg_color_space;
  dec->cinfo.dct_method = dec->idct_method;
  dec->cinfo.raw_data_out = TRUE;

  GST_LOG_OBJECT (dec, "starting decompress");
  guarantee_huff_tables (&dec->cinfo);
  if (!jpeg_start_decompress (&dec->cinfo)) {
    GST_WARNING_OBJECT (dec, "failed to start decompression cycle");
  }

  /* sanity checks to get safe and reasonable output */
  switch (dec->cinfo.jpeg_color_space) {
    case JCS_GRAYSCALE:
      if (dec->cinfo.num_components != 1)
        goto invalid_yuvrgbgrayscale;
      break;
    case JCS_RGB:
      if (dec->cinfo.num_components != 3 || dec->cinfo.max_v_samp_factor > 1 ||
          dec->cinfo.max_h_samp_factor > 1)
        goto invalid_yuvrgbgrayscale;
      break;
    case JCS_YCbCr:
      if (dec->cinfo.num_components != 3 ||
          r_v > 2 || r_v < dec->cinfo.comp_info[0].v_samp_factor ||
          r_v < dec->cinfo.comp_info[1].v_samp_factor ||
          r_h < dec->cinfo.comp_info[0].h_samp_factor ||
          r_h < dec->cinfo.comp_info[1].h_samp_factor)
        goto invalid_yuvrgbgrayscale;
      break;
    default:
      g_assert_not_reached ();
      break;
  }

  width = dec->cinfo.output_width;
  height = dec->cinfo.output_height;

  if (G_UNLIKELY (width < MIN_WIDTH || width > MAX_WIDTH ||
          height < MIN_HEIGHT || height > MAX_HEIGHT))
    goto wrong_size;

  gst_jpeg_dec_negotiate (dec, width, height, dec->cinfo.jpeg_color_space);

  state = gst_video_decoder_get_output_state (bdec);
  ret = gst_video_decoder_alloc_output_frame (bdec, frame);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto alloc_failed;

  if (!gst_video_frame_map (&vframe, &state->info, frame->output_buffer,
          GST_MAP_READWRITE))
    goto alloc_failed;

  GST_LOG_OBJECT (dec, "width %d, height %d", width, height);

  if (dec->cinfo.jpeg_color_space == JCS_RGB) {
    gst_jpeg_dec_decode_rgb (dec, &vframe);
  } else if (dec->cinfo.jpeg_color_space == JCS_GRAYSCALE) {
    gst_jpeg_dec_decode_grayscale (dec, &vframe);
  } else {
    GST_LOG_OBJECT (dec, "decompressing (reqired scanline buffer height = %u)",
        dec->cinfo.rec_outbuf_height);

    /* For some widths jpeglib requires more horizontal padding than I420 
     * provides. In those cases we need to decode into separate buffers and then
     * copy over the data into our final picture buffer, otherwise jpeglib might
     * write over the end of a line into the beginning of the next line,
     * resulting in blocky artifacts on the left side of the picture. */
    if (G_UNLIKELY (width % (dec->cinfo.max_h_samp_factor * DCTSIZE) != 0
            || dec->cinfo.comp_info[0].h_samp_factor != 2
            || dec->cinfo.comp_info[1].h_samp_factor != 1
            || dec->cinfo.comp_info[2].h_samp_factor != 1)) {
      GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, dec,
          "indirect decoding using extra buffer copy");
      gst_jpeg_dec_decode_indirect (dec, &vframe, r_v, r_h,
          dec->cinfo.num_components);
    } else {
      ret = gst_jpeg_dec_decode_direct (dec, &vframe);

      if (G_UNLIKELY (ret != GST_FLOW_OK))
        goto decode_direct_failed;
    }
  }

  gst_video_frame_unmap (&vframe);

  GST_LOG_OBJECT (dec, "decompressing finished");
  jpeg_finish_decompress (&dec->cinfo);

  /* reset error count on successful decode */
  dec->error_count = 0;

  gst_buffer_unmap (frame->input_buffer, &dec->current_frame_map);
  ret = gst_video_decoder_finish_frame (bdec, frame);
  need_unmap = FALSE;

done:

exit:

  if (G_UNLIKELY (ret == GST_FLOW_ERROR)) {
    jpeg_abort_decompress (&dec->cinfo);
    ret = gst_jpeg_dec_post_error_or_warning (dec);
  }

  if (need_unmap)
    gst_buffer_unmap (frame->input_buffer, &dec->current_frame_map);

  if (state)
    gst_video_codec_state_unref (state);

  return ret;

  /* special cases */
need_more_data:
  {
    GST_LOG_OBJECT (dec, "we need more data");
    ret = GST_FLOW_OK;
    goto exit;
  }
  /* ERRORS */
wrong_size:
  {
    gst_jpeg_dec_set_error (dec, GST_FUNCTION, __LINE__,
        "Picture is too small or too big (%ux%u)", width, height);
    ret = GST_FLOW_ERROR;
    goto done;
  }
decode_error:
  {
    gchar err_msg[JMSG_LENGTH_MAX];

    dec->jerr.pub.format_message ((j_common_ptr) (&dec->cinfo), err_msg);

    gst_jpeg_dec_set_error (dec, GST_FUNCTION, __LINE__,
        "Decode error #%u: %s", code, err_msg);

    gst_buffer_unmap (frame->input_buffer, &dec->current_frame_map);
    gst_video_decoder_drop_frame (bdec, frame);
    need_unmap = FALSE;

    ret = GST_FLOW_ERROR;
    goto done;
  }
decode_direct_failed:
  {
    /* already posted an error message */
    jpeg_abort_decompress (&dec->cinfo);
    goto done;
  }
alloc_failed:
  {
    const gchar *reason;

    reason = gst_flow_get_name (ret);

    GST_DEBUG_OBJECT (dec, "failed to alloc buffer, reason %s", reason);
    /* Reset for next time */
    jpeg_abort_decompress (&dec->cinfo);
    if (ret != GST_FLOW_EOS && ret != GST_FLOW_FLUSHING &&
        ret != GST_FLOW_NOT_LINKED) {
      gst_jpeg_dec_set_error (dec, GST_FUNCTION, __LINE__,
          "Buffer allocation failed, reason: %s", reason);
    }
    goto exit;
  }
components_not_supported:
  {
    gst_jpeg_dec_set_error (dec, GST_FUNCTION, __LINE__,
        "number of components not supported: %d (max 3)",
        dec->cinfo.num_components);
    ret = GST_FLOW_ERROR;
    goto done;
  }
unsupported_colorspace:
  {
    gst_jpeg_dec_set_error (dec, GST_FUNCTION, __LINE__,
        "Picture has unknown or unsupported colourspace");
    ret = GST_FLOW_ERROR;
    goto done;
  }
invalid_yuvrgbgrayscale:
  {
    gst_jpeg_dec_set_error (dec, GST_FUNCTION, __LINE__,
        "Picture is corrupt or unhandled YUV/RGB/grayscale layout");
    ret = GST_FLOW_ERROR;
    goto done;
  }
}
コード例 #15
0
static GstFlowReturn
gst_rsvg_decode_image (GstRsvgDec * rsvg, GstBuffer * buffer,
    GstVideoCodecFrame * frame)
{
  GstVideoDecoder *decoder = GST_VIDEO_DECODER (rsvg);
  GstFlowReturn ret = GST_FLOW_OK;
  cairo_t *cr;
  cairo_surface_t *surface;
  RsvgHandle *handle;
  GError *error = NULL;
  RsvgDimensionData dimension;
  gdouble scalex, scaley;
  GstMapInfo minfo;
  GstVideoFrame vframe;
  GstVideoCodecState *output_state;

  GST_LOG_OBJECT (rsvg, "parsing svg");

  if (!gst_buffer_map (buffer, &minfo, GST_MAP_READ)) {
    GST_ERROR_OBJECT (rsvg, "Failed to get SVG image");
    return GST_FLOW_ERROR;
  }
  handle = rsvg_handle_new_from_data (minfo.data, minfo.size, &error);
  if (!handle) {
    GST_ERROR_OBJECT (rsvg, "Failed to parse SVG image: %s", error->message);
    g_error_free (error);
    return GST_FLOW_ERROR;
  }

  rsvg_handle_get_dimensions (handle, &dimension);

  output_state = gst_video_decoder_get_output_state (decoder);
  if ((output_state == NULL)
      || GST_VIDEO_INFO_WIDTH (&output_state->info) != dimension.width
      || GST_VIDEO_INFO_HEIGHT (&output_state->info) != dimension.height) {

    /* Create the output state */
    if (output_state)
      gst_video_codec_state_unref (output_state);
    output_state =
        gst_video_decoder_set_output_state (decoder, GST_RSVG_VIDEO_FORMAT,
        dimension.width, dimension.height, rsvg->input_state);
  }

  ret = gst_video_decoder_allocate_output_frame (decoder, frame);

  if (ret != GST_FLOW_OK) {
    g_object_unref (handle);
    gst_video_codec_state_unref (output_state);
    GST_ERROR_OBJECT (rsvg, "Buffer allocation failed %s",
        gst_flow_get_name (ret));
    return ret;
  }

  GST_LOG_OBJECT (rsvg, "render image at %d x %d",
      GST_VIDEO_INFO_HEIGHT (&output_state->info),
      GST_VIDEO_INFO_WIDTH (&output_state->info));


  if (!gst_video_frame_map (&vframe,
          &output_state->info, frame->output_buffer, GST_MAP_READWRITE)) {
    GST_ERROR_OBJECT (rsvg, "Failed to get SVG image");
    g_object_unref (handle);
    gst_video_codec_state_unref (output_state);
    return GST_FLOW_ERROR;
  }
  surface =
      cairo_image_surface_create_for_data (GST_VIDEO_FRAME_PLANE_DATA (&vframe,
          0), CAIRO_FORMAT_ARGB32, GST_VIDEO_FRAME_WIDTH (&vframe),
      GST_VIDEO_FRAME_HEIGHT (&vframe), GST_VIDEO_FRAME_PLANE_STRIDE (&vframe,
          0));

  cr = cairo_create (surface);
  cairo_set_operator (cr, CAIRO_OPERATOR_CLEAR);
  cairo_set_source_rgba (cr, 1.0, 1.0, 1.0, 0.0);
  cairo_paint (cr);
  cairo_set_operator (cr, CAIRO_OPERATOR_OVER);
  cairo_set_source_rgba (cr, 0.0, 0.0, 0.0, 1.0);

  scalex = scaley = 1.0;
  if (GST_VIDEO_INFO_WIDTH (&output_state->info) != dimension.width) {
    scalex =
        ((gdouble) GST_VIDEO_INFO_WIDTH (&output_state->info)) /
        ((gdouble) dimension.width);
  }
  if (GST_VIDEO_INFO_HEIGHT (&output_state->info) != dimension.height) {
    scaley =
        ((gdouble) GST_VIDEO_INFO_HEIGHT (&output_state->info)) /
        ((gdouble) dimension.height);
  }
  cairo_scale (cr, scalex, scaley);
  rsvg_handle_render_cairo (handle, cr);

  g_object_unref (handle);
  cairo_destroy (cr);
  cairo_surface_destroy (surface);

  /* Now unpremultiply Cairo's ARGB to match GStreamer's */
  gst_rsvg_decode_unpremultiply (GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0),
      GST_VIDEO_FRAME_WIDTH (&vframe), GST_VIDEO_FRAME_HEIGHT (&vframe));

  gst_video_codec_state_unref (output_state);
  gst_buffer_unmap (buffer, &minfo);
  gst_video_frame_unmap (&vframe);

  return ret;
}
コード例 #16
0
ファイル: gstmpeg2dec.c プロジェクト: PeterXu/gst-mobile
static GstFlowReturn
handle_slice (GstMpeg2dec * mpeg2dec, const mpeg2_info_t * info)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstVideoCodecFrame *frame;
  const mpeg2_picture_t *picture;
  gboolean key_frame = FALSE;
  GstVideoCodecState *state;

  GST_DEBUG_OBJECT (mpeg2dec,
      "fbuf:%p display_picture:%p current_picture:%p fbuf->id:%d",
      info->display_fbuf, info->display_picture, info->current_picture,
      GPOINTER_TO_INT (info->display_fbuf->id) - 1);

  /* Note, the fbuf-id is shifted by 1 to make the difference between
   * NULL values (used by dummy buffers) and 'real' values */
  frame = gst_video_decoder_get_frame (GST_VIDEO_DECODER (mpeg2dec),
      GPOINTER_TO_INT (info->display_fbuf->id) - 1);
  if (!frame)
    goto no_frame;
  picture = info->display_picture;
  key_frame = (picture->flags & PIC_MASK_CODING_TYPE) == PIC_FLAG_CODING_TYPE_I;

  GST_DEBUG_OBJECT (mpeg2dec, "picture flags: %d, type: %d, keyframe: %d",
      picture->flags, picture->flags & PIC_MASK_CODING_TYPE, key_frame);

  if (key_frame) {
    mpeg2_skip (mpeg2dec->decoder, 0);
  }

  if (mpeg2dec->discont_state == MPEG2DEC_DISC_NEW_KEYFRAME && key_frame)
    mpeg2dec->discont_state = MPEG2DEC_DISC_NONE;

  if (picture->flags & PIC_FLAG_SKIP) {
    GST_DEBUG_OBJECT (mpeg2dec, "dropping buffer because of skip flag");
    ret = gst_video_decoder_drop_frame (GST_VIDEO_DECODER (mpeg2dec), frame);
    mpeg2_skip (mpeg2dec->decoder, 1);
    return ret;
  }

  if (mpeg2dec->discont_state != MPEG2DEC_DISC_NONE) {
    GST_DEBUG_OBJECT (mpeg2dec, "dropping buffer, discont state %d",
        mpeg2dec->discont_state);
    ret = gst_video_decoder_drop_frame (GST_VIDEO_DECODER (mpeg2dec), frame);
    return ret;
  }

  state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (mpeg2dec));

  /* do cropping if the target region is smaller than the input one */
  if (mpeg2dec->need_cropping && !mpeg2dec->has_cropping) {
    GstVideoFrame *vframe;

    if (gst_video_decoder_get_max_decode_time (GST_VIDEO_DECODER (mpeg2dec),
            frame) < 0) {
      GST_DEBUG_OBJECT (mpeg2dec, "dropping buffer crop, too late");
      ret = gst_video_decoder_drop_frame (GST_VIDEO_DECODER (mpeg2dec), frame);
      goto beach;
    }

    GST_DEBUG_OBJECT (mpeg2dec, "cropping buffer");
    vframe = gst_mpeg2dec_get_buffer (mpeg2dec, frame->system_frame_number);
    g_assert (vframe != NULL);
    ret = gst_mpeg2dec_crop_buffer (mpeg2dec, frame, vframe);
  }

  ret = gst_video_decoder_finish_frame (GST_VIDEO_DECODER (mpeg2dec), frame);

beach:
  gst_video_codec_state_unref (state);
  return ret;

no_frame:
  {
    GST_DEBUG ("display buffer does not have a valid frame");
    return GST_FLOW_OK;
  }
}
コード例 #17
0
static GstFlowReturn gst_openh264dec_handle_frame(GstVideoDecoder *decoder, GstVideoCodecFrame *frame)
{
    GstOpenh264Dec *openh264dec = GST_OPENH264DEC(decoder);
    GstMapInfo map_info;
    GstVideoCodecState *state;
    SBufferInfo dst_buf_info;
    DECODING_STATE ret;
    guint8 *yuvdata[3];
    GstFlowReturn flow_status;
    GstVideoFrame video_frame;
    guint actual_width, actual_height;
    guint i;
    guint8 *p;
    guint row_stride, component_width, component_height, src_width, row;

    if (frame) {
        if (!gst_buffer_map(frame->input_buffer, &map_info, GST_MAP_READ)) {
            GST_ERROR_OBJECT(openh264dec, "Cannot map input buffer!");
            return GST_FLOW_ERROR;
        }

        GST_LOG_OBJECT(openh264dec, "handle frame, %d", map_info.size > 4 ? map_info.data[4] & 0x1f : -1);

        memset (&dst_buf_info, 0, sizeof (SBufferInfo));
        ret = openh264dec->priv->decoder->DecodeFrame2(map_info.data, map_info.size, yuvdata, &dst_buf_info);

        if (ret == dsNoParamSets) {
            GST_DEBUG_OBJECT(openh264dec, "Requesting a key unit");
            gst_pad_push_event(GST_VIDEO_DECODER_SINK_PAD(decoder),
                gst_video_event_new_upstream_force_key_unit(GST_CLOCK_TIME_NONE, FALSE, 0));
        }

        if (ret != dsErrorFree && ret != dsNoParamSets) {
            GST_DEBUG_OBJECT(openh264dec, "Requesting a key unit");
            gst_pad_push_event(GST_VIDEO_DECODER_SINK_PAD(decoder),
                               gst_video_event_new_upstream_force_key_unit(GST_CLOCK_TIME_NONE, FALSE, 0));
            GST_LOG_OBJECT(openh264dec, "error decoding nal, return code: %d", ret);
        }

        gst_buffer_unmap(frame->input_buffer, &map_info);
        gst_video_codec_frame_unref (frame);
        frame = NULL;
    } else {
        memset (&dst_buf_info, 0, sizeof (SBufferInfo));
        ret = openh264dec->priv->decoder->DecodeFrame2(NULL, 0, yuvdata, &dst_buf_info);
        if (ret != dsErrorFree)
            return GST_FLOW_EOS;
    }

    /* FIXME: openh264 has no way for us to get a connection
     * between the input and output frames, we just have to
     * guess based on the input. Fortunately openh264 can
     * only do baseline profile. */
    frame = gst_video_decoder_get_oldest_frame (decoder);
    if (!frame) {
      /* Can only happen in finish() */
      return GST_FLOW_EOS;
    }

    /* No output available yet */
    if (dst_buf_info.iBufferStatus != 1) {
        return (frame ? GST_FLOW_OK : GST_FLOW_EOS);
    }

    actual_width  = dst_buf_info.UsrData.sSystemBuffer.iWidth;
    actual_height = dst_buf_info.UsrData.sSystemBuffer.iHeight;

    if (!gst_pad_has_current_caps (GST_VIDEO_DECODER_SRC_PAD (openh264dec)) || actual_width != openh264dec->priv->width || actual_height != openh264dec->priv->height) {
        state = gst_video_decoder_set_output_state(decoder,
            GST_VIDEO_FORMAT_I420,
            actual_width,
            actual_height,
            openh264dec->priv->input_state);
        openh264dec->priv->width = actual_width;
        openh264dec->priv->height = actual_height;

        if (!gst_video_decoder_negotiate(decoder)) {
            GST_ERROR_OBJECT(openh264dec, "Failed to negotiate with downstream elements");
            return GST_FLOW_NOT_NEGOTIATED;
        }
    } else {
        state = gst_video_decoder_get_output_state(decoder);
    }

    flow_status = gst_video_decoder_allocate_output_frame(decoder, frame);
    if (flow_status != GST_FLOW_OK) {
        gst_video_codec_state_unref (state);
        return flow_status;
    }

    if (!gst_video_frame_map(&video_frame, &state->info, frame->output_buffer, GST_MAP_WRITE)) {
        GST_ERROR_OBJECT(openh264dec, "Cannot map output buffer!");
        gst_video_codec_state_unref (state);
        return GST_FLOW_ERROR;
    }

    for (i = 0; i < 3; i++) {
        p = GST_VIDEO_FRAME_COMP_DATA(&video_frame, i);
        row_stride = GST_VIDEO_FRAME_COMP_STRIDE(&video_frame, i);
        component_width = GST_VIDEO_FRAME_COMP_WIDTH(&video_frame, i);
        component_height = GST_VIDEO_FRAME_COMP_HEIGHT(&video_frame, i);
        src_width = i < 1 ? dst_buf_info.UsrData.sSystemBuffer.iStride[0] : dst_buf_info.UsrData.sSystemBuffer.iStride[1];
        for (row = 0; row < component_height; row++) {
            memcpy(p, yuvdata[i], component_width);
            p += row_stride;
            yuvdata[i] += src_width;
        }
    }
    gst_video_codec_state_unref (state);
    gst_video_frame_unmap(&video_frame);

    return gst_video_decoder_finish_frame(decoder, frame);
}
コード例 #18
0
ファイル: gstmfcdec.c プロジェクト: PeterXu/gst-mobile
static GstFlowReturn
gst_mfc_dec_dequeue_output (GstMFCDec * self)
{
  GstFlowReturn ret = GST_FLOW_OK;
  gint mfc_ret;
  GstVideoCodecFrame *frame = NULL;
  GstBuffer *outbuf = NULL;
  struct mfc_buffer *mfc_outbuf = NULL;
  gint width, height;
  gint crop_left, crop_top, crop_width, crop_height;
  gint src_ystride, src_uvstride;
  GstVideoCodecState *state = NULL;
  gint64 deadline;
  struct timeval timestamp;

  if (!self->initialized) {
    GST_DEBUG_OBJECT (self, "Initializing decoder");
    if ((mfc_ret = mfc_dec_init_output (self->context, 1)) < 0)
      goto initialize_error;
    self->initialized = TRUE;
  }

  while ((mfc_ret = mfc_dec_output_available (self->context)) > 0) {
    GST_DEBUG_OBJECT (self, "Dequeueing output");

    mfc_dec_get_output_size (self->context, &width, &height);
    mfc_dec_get_output_stride (self->context, &src_ystride, &src_uvstride);
    mfc_dec_get_crop_size (self->context, &crop_left, &crop_top, &crop_width,
        &crop_height);

    GST_DEBUG_OBJECT (self, "Have output: width %d, height %d, "
        "Y stride %d, UV stride %d, "
        "crop_left %d, crop_right %d, "
        "crop_width %d, crop_height %d", width, height, src_ystride,
        src_uvstride, crop_left, crop_top, crop_width, crop_height);

    state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (self));

    if (!state || self->width != width || self->height != height ||
        self->src_stride[0] != src_ystride
        || self->src_stride[1] != src_uvstride
        || self->crop_left != self->crop_left || self->crop_top != crop_top
        || self->crop_width != crop_width || self->crop_height != crop_height) {
      self->width = width;
      self->height = height;
      self->crop_left = crop_left;
      self->crop_top = crop_top;
      self->crop_width = crop_width;
      self->crop_height = crop_height;
      self->src_stride[0] = src_ystride;
      self->src_stride[1] = src_uvstride;
      self->src_stride[2] = 0;

      if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self)))
        goto negotiate_error;

      if (state)
        gst_video_codec_state_unref (state);
      state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (self));
    }

    if ((mfc_ret =
            mfc_dec_dequeue_output (self->context, &mfc_outbuf,
                &timestamp)) < 0) {
      if (mfc_ret == -2) {
        GST_DEBUG_OBJECT (self, "Timeout dequeueing output, trying again");
        mfc_ret =
            mfc_dec_dequeue_output (self->context, &mfc_outbuf, &timestamp);
      }

      if (mfc_ret < 0)
        goto dequeue_error;
    }

    g_assert (mfc_outbuf != NULL);

    GST_DEBUG_OBJECT (self, "Got output buffer with ID %ld", timestamp.tv_sec);

    frame = NULL;
    if (timestamp.tv_sec != -1)
      frame =
          gst_video_decoder_get_frame (GST_VIDEO_DECODER (self),
          timestamp.tv_sec);

    if (frame) {
      deadline =
          gst_video_decoder_get_max_decode_time (GST_VIDEO_DECODER (self),
          frame);
      if (deadline < 0) {
        GST_LOG_OBJECT (self,
            "Dropping too late frame: deadline %" G_GINT64_FORMAT, deadline);
        ret = gst_video_decoder_drop_frame (GST_VIDEO_DECODER (self), frame);
        frame = NULL;
        outbuf = NULL;
        goto done;
      }

      ret =
          gst_video_decoder_allocate_output_frame (GST_VIDEO_DECODER (self),
          frame);

      if (ret != GST_FLOW_OK)
        goto alloc_error;

      outbuf = frame->output_buffer;
    } else {
      GST_WARNING_OBJECT (self, "Didn't find a frame for ID %ld",
          timestamp.tv_sec);

      outbuf =
          gst_video_decoder_allocate_output_buffer (GST_VIDEO_DECODER (self));

      if (!outbuf) {
        ret = GST_FLOW_ERROR;
        goto alloc_error;
      }
    }

    ret = gst_mfc_dec_fill_outbuf (self, outbuf, mfc_outbuf, state);
    if (ret != GST_FLOW_OK)
      goto fill_error;

    if (frame) {
      ret = gst_video_decoder_finish_frame (GST_VIDEO_DECODER (self), frame);
      frame = NULL;
      outbuf = NULL;
    } else {
      ret = gst_pad_push (GST_VIDEO_DECODER_SRC_PAD (self), outbuf);
      outbuf = NULL;
    }

    if (ret != GST_FLOW_OK)
      GST_INFO_OBJECT (self, "Pushing frame returned: %s",
          gst_flow_get_name (ret));

  done:
    if (mfc_outbuf) {
      if ((mfc_ret = mfc_dec_enqueue_output (self->context, mfc_outbuf)) < 0)
        goto enqueue_error;
    }

    if (!frame && outbuf)
      gst_buffer_unref (outbuf);
    if (frame)
      gst_video_codec_frame_unref (frame);
    if (state)
      gst_video_codec_state_unref (state);

    frame = NULL;
    outbuf = NULL;

    if (ret != GST_FLOW_OK)
      break;
  }

  return ret;

initialize_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, INIT, ("Failed to initialize output"),
        ("mfc_dec_init: %d", mfc_ret));
    ret = GST_FLOW_ERROR;
    goto done;
  }

negotiate_error:
  {
    GST_ELEMENT_ERROR (self, CORE, NEGOTIATION, ("Failed to negotiate"),
        (NULL));
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }

dequeue_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, FAILED,
        ("Failed to dequeue output buffer"), ("mfc_dec_dequeue_output: %d",
            mfc_ret));
    ret = GST_FLOW_ERROR;
    goto done;
  }

alloc_error:
  {
    GST_ELEMENT_ERROR (self, CORE, FAILED, ("Failed to allocate output buffer"),
        (NULL));
    ret = GST_FLOW_ERROR;
    goto done;
  }

fill_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, FAILED, ("Failed to fill output buffer"),
        (NULL));
    ret = GST_FLOW_ERROR;
    goto done;
  }

enqueue_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, FAILED,
        ("Failed to enqueue output buffer"), ("mfc_dec_enqueue_output: %d",
            mfc_ret));
    ret = GST_FLOW_ERROR;
    goto done;
  }
}
コード例 #19
0
ファイル: vtdec.c プロジェクト: luisbg/gst-plugins-bad
static gboolean
gst_vtdec_negotiate (GstVideoDecoder * decoder)
{
  GstVideoCodecState *output_state = NULL;
  GstCaps *peercaps = NULL, *caps = NULL, *templcaps = NULL, *prevcaps = NULL;
  GstVideoFormat format;
  GstStructure *structure;
  const gchar *s;
  GstVtdec *vtdec;
  OSStatus err = noErr;
  GstCapsFeatures *features = NULL;
  gboolean output_textures;

  vtdec = GST_VTDEC (decoder);
  if (vtdec->session)
    gst_vtdec_push_frames_if_needed (vtdec, TRUE, FALSE);

  output_state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (vtdec));
  if (output_state) {
    prevcaps = gst_caps_ref (output_state->caps);
    gst_video_codec_state_unref (output_state);
  }

  peercaps = gst_pad_peer_query_caps (GST_VIDEO_DECODER_SRC_PAD (vtdec), NULL);
  if (prevcaps && gst_caps_can_intersect (prevcaps, peercaps)) {
    /* The hardware decoder can become (temporarily) unavailable across
     * VTDecompressionSessionCreate/Destroy calls. So if the currently configured
     * caps are still accepted by downstream we keep them so we don't have to
     * destroy and recreate the session.
     */
    GST_INFO_OBJECT (vtdec,
        "current and peer caps are compatible, keeping current caps");
    caps = gst_caps_ref (prevcaps);
  } else {
    templcaps =
        gst_pad_get_pad_template_caps (GST_VIDEO_DECODER_SRC_PAD (decoder));
    caps =
        gst_caps_intersect_full (peercaps, templcaps, GST_CAPS_INTERSECT_FIRST);
    gst_caps_unref (templcaps);
  }
  gst_caps_unref (peercaps);

  caps = gst_caps_truncate (gst_caps_make_writable (caps));
  structure = gst_caps_get_structure (caps, 0);
  s = gst_structure_get_string (structure, "format");
  format = gst_video_format_from_string (s);
  features = gst_caps_get_features (caps, 0);
  if (features)
    features = gst_caps_features_copy (features);

  output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (vtdec),
      format, vtdec->video_info.width, vtdec->video_info.height,
      vtdec->input_state);
  output_state->caps = gst_video_info_to_caps (&output_state->info);
  if (features) {
    gst_caps_set_features (output_state->caps, 0, features);
    output_textures =
        gst_caps_features_contains (features,
        GST_CAPS_FEATURE_MEMORY_GL_MEMORY);
    if (output_textures)
      gst_caps_set_simple (output_state->caps, "texture-target", G_TYPE_STRING,
#if !HAVE_IOS
          GST_GL_TEXTURE_TARGET_RECTANGLE_STR,
#else
          GST_GL_TEXTURE_TARGET_2D_STR,
#endif
          NULL);
  }
  gst_caps_unref (caps);

  if (!prevcaps || !gst_caps_is_equal (prevcaps, output_state->caps)) {
    gboolean renegotiating = vtdec->session != NULL;

    GST_INFO_OBJECT (vtdec,
        "negotiated output format %" GST_PTR_FORMAT " previous %"
        GST_PTR_FORMAT, output_state->caps, prevcaps);

    if (vtdec->session)
      gst_vtdec_invalidate_session (vtdec);

    err = gst_vtdec_create_session (vtdec, format, TRUE);
    if (err == noErr) {
      GST_INFO_OBJECT (vtdec, "using hardware decoder");
    } else if (err == kVTVideoDecoderNotAvailableNowErr && renegotiating) {
      GST_WARNING_OBJECT (vtdec, "hw decoder not available anymore");
      err = gst_vtdec_create_session (vtdec, format, FALSE);
    }

    if (err != noErr) {
      GST_ELEMENT_ERROR (vtdec, RESOURCE, FAILED, (NULL),
          ("VTDecompressionSessionCreate returned %d", (int) err));
    }
  }

  if (vtdec->texture_cache != NULL && !output_textures) {
    gst_video_texture_cache_free (vtdec->texture_cache);
    vtdec->texture_cache = NULL;
  }

  if (err == noErr && output_textures) {
    /* call this regardless of whether caps have changed or not since a new
     * local context could have become available
     */
    gst_gl_context_helper_ensure_context (vtdec->ctxh);

    GST_INFO_OBJECT (vtdec, "pushing textures, context %p old context %p",
        vtdec->ctxh->context,
        vtdec->texture_cache ? vtdec->texture_cache->ctx : NULL);

    if (vtdec->texture_cache
        && vtdec->texture_cache->ctx != vtdec->ctxh->context) {
      gst_video_texture_cache_free (vtdec->texture_cache);
      vtdec->texture_cache = NULL;
    }
    if (!vtdec->texture_cache)
      setup_texture_cache (vtdec, vtdec->ctxh->context);
  }

  if (prevcaps)
    gst_caps_unref (prevcaps);

  if (err != noErr)
    return FALSE;

  return GST_VIDEO_DECODER_CLASS (gst_vtdec_parent_class)->negotiate (decoder);
}
コード例 #20
0
ファイル: gstmpeg2dec.c プロジェクト: PeterXu/gst-mobile
static gboolean
gst_mpeg2dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query)
{
  GstMpeg2dec *dec = GST_MPEG2DEC (decoder);
  GstVideoCodecState *state;
  GstBufferPool *pool;
  guint size, min, max;
  GstStructure *config;
  GstAllocator *allocator;
  GstAllocationParams params;
  gboolean update_allocator;

  /* Set allocation parameters to guarantee 16-byte aligned output buffers */
  if (gst_query_get_n_allocation_params (query) > 0) {
    gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
    update_allocator = TRUE;
  } else {
    allocator = NULL;
    gst_allocation_params_init (&params);
    update_allocator = FALSE;
  }

  params.align = MAX (params.align, 15);

  if (update_allocator)
    gst_query_set_nth_allocation_param (query, 0, allocator, &params);
  else
    gst_query_add_allocation_param (query, allocator, &params);
  if (allocator)
    gst_object_unref (allocator);

  /* Now chain up to the parent class to guarantee that we can
   * get a buffer pool from the query */
  if (!GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder,
          query))
    return FALSE;

  state = gst_video_decoder_get_output_state (decoder);

  gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);

  dec->has_cropping = FALSE;
  config = gst_buffer_pool_get_config (pool);
  if (gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL)) {
    gst_buffer_pool_config_add_option (config,
        GST_BUFFER_POOL_OPTION_VIDEO_META);
    dec->has_cropping =
        gst_query_find_allocation_meta (query, GST_VIDEO_CROP_META_API_TYPE,
        NULL);
  }

  if (dec->has_cropping) {
    GstCaps *caps;

    /* Calculate uncropped size */
    size = MAX (size, dec->decoded_info.size);
    caps = gst_video_info_to_caps (&dec->decoded_info);
    gst_buffer_pool_config_set_params (config, caps, size, min, max);
    gst_caps_unref (caps);
  }

  gst_buffer_pool_set_config (pool, config);

  gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);

  gst_object_unref (pool);
  gst_video_codec_state_unref (state);

  return TRUE;
}
コード例 #21
0
ファイル: gstmpeg2dec.c プロジェクト: PeterXu/gst-mobile
static GstFlowReturn
gst_mpeg2dec_crop_buffer (GstMpeg2dec * dec, GstVideoCodecFrame * in_frame,
    GstVideoFrame * input_vframe)
{
  GstVideoCodecState *state;
  GstVideoInfo *info;
  GstVideoInfo *dinfo;
  guint c, n_planes;
  GstVideoFrame output_frame;
  GstFlowReturn ret;

  state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (dec));
  info = &state->info;
  dinfo = &dec->decoded_info;

  GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, dec,
      "Copying input buffer %ux%u (%" G_GSIZE_FORMAT ") to output buffer "
      "%ux%u (%" G_GSIZE_FORMAT ")", dinfo->width, dinfo->height,
      dinfo->size, info->width, info->height, info->size);

  ret =
      gst_video_decoder_allocate_output_frame (GST_VIDEO_DECODER (dec),
      in_frame);
  if (ret != GST_FLOW_OK)
    goto beach;

  if (!gst_video_frame_map (&output_frame, info, in_frame->output_buffer,
          GST_MAP_WRITE))
    goto map_fail;

  n_planes = GST_VIDEO_FRAME_N_PLANES (&output_frame);
  for (c = 0; c < n_planes; c++) {
    guint w, h, j;
    guint8 *sp, *dp;
    gint ss, ds;

    sp = GST_VIDEO_FRAME_PLANE_DATA (input_vframe, c);
    dp = GST_VIDEO_FRAME_PLANE_DATA (&output_frame, c);

    ss = GST_VIDEO_FRAME_PLANE_STRIDE (input_vframe, c);
    ds = GST_VIDEO_FRAME_PLANE_STRIDE (&output_frame, c);

    w = MIN (ABS (ss), ABS (ds));
    h = GST_VIDEO_FRAME_COMP_HEIGHT (&output_frame, c);

    GST_CAT_DEBUG (GST_CAT_PERFORMANCE, "copy plane %u, w:%u h:%u ", c, w, h);

    for (j = 0; j < h; j++) {
      memcpy (dp, sp, w);
      dp += ds;
      sp += ss;
    }
  }

  gst_video_frame_unmap (&output_frame);

  GST_BUFFER_FLAGS (in_frame->output_buffer) =
      GST_BUFFER_FLAGS (input_vframe->buffer);

beach:
  gst_video_codec_state_unref (state);

  return ret;

map_fail:
  {
    GST_ERROR_OBJECT (dec, "Failed to map output frame");
    gst_video_codec_state_unref (state);
    return GST_FLOW_ERROR;
  }
}
コード例 #22
0
ファイル: vtdec.c プロジェクト: shakin/gst-plugins-bad
static gboolean
gst_vtdec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query)
{
  gboolean ret;
  GstCaps *caps;
  GstCapsFeatures *features;
  GstVtdec *vtdec = GST_VTDEC (decoder);

  ret =
      GST_VIDEO_DECODER_CLASS (gst_vtdec_parent_class)->decide_allocation
      (decoder, query);
  if (!ret)
    goto out;

  gst_query_parse_allocation (query, &caps, NULL);
  if (caps) {
    GstGLContext *gl_context = NULL;
    features = gst_caps_get_features (caps, 0);

    if (gst_caps_features_contains (features,
            GST_CAPS_FEATURE_MEMORY_GL_MEMORY)) {
      GstContext *context = NULL;
      GstQuery *query = gst_query_new_context ("gst.gl.local_context");
      if (gst_pad_peer_query (GST_VIDEO_DECODER_SRC_PAD (decoder), query)) {

        gst_query_parse_context (query, &context);
        if (context) {
          const GstStructure *s = gst_context_get_structure (context);
          gst_structure_get (s, "context", GST_GL_TYPE_CONTEXT, &gl_context,
              NULL);
        }
      }
      gst_query_unref (query);

      if (context) {
        GstVideoFormat internal_format;
        GstVideoCodecState *output_state =
            gst_video_decoder_get_output_state (decoder);

        GST_INFO_OBJECT (decoder, "pushing textures. GL context %p", context);
        if (vtdec->texture_cache)
          gst_core_video_texture_cache_free (vtdec->texture_cache);

#ifdef HAVE_IOS
        internal_format = GST_VIDEO_FORMAT_NV12;
#else
        internal_format = GST_VIDEO_FORMAT_UYVY;
#endif
        vtdec->texture_cache = gst_core_video_texture_cache_new (gl_context);
        gst_core_video_texture_cache_set_format (vtdec->texture_cache,
            internal_format, output_state->caps);
        gst_video_codec_state_unref (output_state);
        gst_object_unref (gl_context);
      } else {
        GST_WARNING_OBJECT (decoder,
            "got memory:GLMemory caps but not GL context from downstream element");
      }
    }
  }

out:
  return ret;
}