예제 #1
0
static gboolean
theora_enc_stop (GstVideoEncoder * benc)
{
  GstTheoraEnc *enc;

  GST_DEBUG_OBJECT (benc, "stop: clearing theora state");
  enc = GST_THEORA_ENC (benc);

  if (enc->encoder)
    th_encode_free (enc->encoder);
  enc->encoder = NULL;
  th_comment_clear (&enc->comment);
  th_info_clear (&enc->info);

  if (enc->input_state)
    gst_video_codec_state_unref (enc->input_state);
  enc->input_state = NULL;

  /* Everything else is handled in reset() */
  theora_enc_clear_multipass_cache (enc);

  return TRUE;
}
예제 #2
0
static gboolean
gst_openh264enc_stop (GstVideoEncoder * encoder)
{
  GstOpenh264Enc *openh264enc;

  openh264enc = GST_OPENH264ENC (encoder);

  if (openh264enc->encoder != NULL) {
    openh264enc->encoder->Uninitialize ();
    WelsDestroySVCEncoder (openh264enc->encoder);
    openh264enc->encoder = NULL;
  }
  openh264enc->encoder = NULL;

  if (openh264enc->input_state) {
    gst_video_codec_state_unref (openh264enc->input_state);
  }
  openh264enc->input_state = NULL;

  GST_DEBUG_OBJECT (openh264enc, "openh264_enc_stop called");

  return TRUE;
}
예제 #3
0
static gboolean gst_aml_vdec_set_format(GstVideoDecoder *dec, GstVideoCodecState *state)
{
	gboolean ret = FALSE;
	GstStructure *structure;
	const char *name;
	GstVideoInfo *info;
	gint par_num, par_den;
	GstVideoFormat fmt;
	GstAmlVdec *amlvdec = GST_AMLVDEC(dec);

	g_return_val_if_fail(state != NULL, FALSE);
	if (amlvdec->input_state)
		gst_video_codec_state_unref(amlvdec->input_state);
	amlvdec->input_state = gst_video_codec_state_ref(state);

	structure = gst_caps_get_structure(state->caps, 0);
	name = gst_structure_get_name(structure);
	GST_INFO_OBJECT(amlvdec, "%s = %s", __FUNCTION__, name);
	if (name) {
		ret = gst_set_vstream_info(amlvdec, state->caps);
		if (!amlvdec->output_state) {
			info = &amlvdec->input_state->info;
			fmt = GST_VIDEO_FORMAT_xRGB;
			GST_VIDEO_INFO_WIDTH (info) = amlvdec->pcodec->am_sysinfo.width;
			GST_VIDEO_INFO_HEIGHT (info) = amlvdec->pcodec->am_sysinfo.height;
			par_num = GST_VIDEO_INFO_PAR_N(info);
			par_den = GST_VIDEO_INFO_PAR_D(info);
			amlvdec->output_state = gst_video_decoder_set_output_state(GST_VIDEO_DECODER(amlvdec),
					fmt, info->width,
					info->height,
					amlvdec->input_state);
			gst_video_decoder_negotiate (GST_VIDEO_DECODER (amlvdec));
		}

	}
	return ret;
}
예제 #4
0
static gboolean
gst_msdkdec_set_format (GstVideoDecoder * decoder, GstVideoCodecState * state)
{
  GstMsdkDec *thiz = GST_MSDKDEC (decoder);

  if (thiz->input_state) {
    /* mark for re-negotiation if display resolution changes */
    if ((GST_VIDEO_INFO_WIDTH (&thiz->input_state->info) !=
            GST_VIDEO_INFO_WIDTH (&state->info)) ||
        GST_VIDEO_INFO_HEIGHT (&thiz->input_state->info) !=
        GST_VIDEO_INFO_HEIGHT (&state->info))
      thiz->do_renego = TRUE;
    gst_video_codec_state_unref (thiz->input_state);
  }
  thiz->input_state = gst_video_codec_state_ref (state);

  /* we don't set output state here to avoid caching of mismatched
   * video information if there is dynamic resolution change in the stream.
   * All negotiation code is consolidated in gst_msdkdec_negotiate() and
   * this will be invoked from handle_frame() */

  gst_msdkdec_set_latency (thiz);
  return TRUE;
}
예제 #5
0
static gboolean
gst_vaapiencode_set_format (GstVideoEncoder * venc, GstVideoCodecState * state)
{
  GstVaapiEncode *const encode = GST_VAAPIENCODE_CAST (venc);

  g_return_val_if_fail (state->caps != NULL, FALSE);

  if (!ensure_encoder (encode))
    return FALSE;
  if (!set_codec_state (encode, state))
    return FALSE;

  if (!gst_vaapi_plugin_base_set_caps (GST_VAAPI_PLUGIN_BASE (encode),
          state->caps, NULL))
    return FALSE;

  if (encode->input_state)
    gst_video_codec_state_unref (encode->input_state);
  encode->input_state = gst_video_codec_state_ref (state);
  encode->input_state_changed = TRUE;

  return gst_pad_start_task (GST_VAAPI_PLUGIN_BASE_SRC_PAD (encode),
      (GstTaskFunction) gst_vaapiencode_buffer_loop, encode, NULL);
}
예제 #6
0
파일: base_enc.c 프로젝트: Radbug/gst-fsl
static gboolean gst_fsl_vpu_base_enc_set_format(GstVideoEncoder *encoder, GstVideoCodecState *state)
{
	VpuEncRetCode ret;
	GstVideoCodecState *output_state;
	GstFslVpuBaseEncClass *klass;
	GstFslVpuBaseEnc *vpu_base_enc;
	
	vpu_base_enc = GST_FSL_VPU_BASE_ENC(encoder);
	klass = GST_FSL_VPU_BASE_ENC_CLASS(G_OBJECT_GET_CLASS(vpu_base_enc));

	g_assert(klass->set_open_params != NULL);
	g_assert(klass->get_output_caps != NULL);

	/* Close old encoder instance */
	gst_fsl_vpu_base_enc_close_encoder(vpu_base_enc);

	/* Clean up existing framebuffers structure;
	 * if some previous and still existing buffer pools depend on this framebuffers
	 * structure, they will extend its lifetime, since they ref'd it
	 */
	if (vpu_base_enc->framebuffers != NULL)
	{
		gst_object_unref(vpu_base_enc->framebuffers);
		vpu_base_enc->framebuffers = NULL;
	}

	if (vpu_base_enc->output_phys_buffer != NULL)
	{
		gst_allocator_free(gst_fsl_vpu_enc_allocator_obtain(), (GstMemory *)(vpu_base_enc->output_phys_buffer));
		vpu_base_enc->output_phys_buffer = NULL;
	}

	memset(&(vpu_base_enc->open_param), 0, sizeof(VpuEncOpenParam));

	/* These params are usually not set by derived classes */
	vpu_base_enc->open_param.nPicWidth = GST_VIDEO_INFO_WIDTH(&(state->info));
	vpu_base_enc->open_param.nPicHeight = GST_VIDEO_INFO_HEIGHT(&(state->info));
	vpu_base_enc->open_param.nFrameRate = (GST_VIDEO_INFO_FPS_N(&(state->info)) & 0xffffUL) | (((GST_VIDEO_INFO_FPS_D(&(state->info)) - 1) & 0xffffUL) << 16);
	vpu_base_enc->open_param.sMirror = VPU_ENC_MIRDIR_NONE; /* don't use VPU rotation (IPU has better performance) */
	vpu_base_enc->open_param.nBitRate = vpu_base_enc->bitrate;
	vpu_base_enc->open_param.nGOPSize = vpu_base_enc->gop_size;
	vpu_base_enc->open_param.nUserGamma = (int)(32768 * vpu_base_enc->qp_smoothing);
	vpu_base_enc->open_param.nAvcIntra16x16OnlyModeEnable = vpu_base_enc->intra_16x16_only ? 1 : 0;
	vpu_base_enc->open_param.nRcIntervalMode = 1;
	/* These params are defaults, and are often overwritten by derived classes */
	vpu_base_enc->open_param.nUserQpMax = -1;
	vpu_base_enc->open_param.nUserQpMin = -1;
	vpu_base_enc->open_param.nRcIntraQp = -1;

	GST_DEBUG_OBJECT(vpu_base_enc, "setting bitrate to %u kbps and GOP size to %u", vpu_base_enc->open_param.nBitRate, vpu_base_enc->open_param.nGOPSize);

	/* Give the derived class a chance to set params */
	if (!klass->set_open_params(vpu_base_enc, &(vpu_base_enc->open_param)))
	{
		GST_ERROR_OBJECT(vpu_base_enc, "derived class could not set open params");
		return FALSE;
	}

	/* The actual initialization; requires bitstream information (such as the codec type), which
	 * is determined by the fill_param_set call before */
	ret = VPU_EncOpen(&(vpu_base_enc->handle), &(vpu_base_enc->mem_info), &(vpu_base_enc->open_param));
	if (ret != VPU_ENC_RET_SUCCESS)
	{
		GST_ERROR_OBJECT(vpu_base_enc, "opening new VPU handle failed: %s", gst_fsl_vpu_strerror(ret));
		return FALSE;
	}

	vpu_base_enc->vpu_inst_opened = TRUE;

	/* configure AFTER setting vpu_inst_opened to TRUE, to make sure that in case of
	   config failure the VPU handle is closed in the finalizer */

	ret = VPU_EncConfig(vpu_base_enc->handle, VPU_ENC_CONF_NONE, NULL);
	if (ret != VPU_ENC_RET_SUCCESS)
	{
		GST_ERROR_OBJECT(vpu_base_enc, "could not apply default configuration: %s", gst_fsl_vpu_strerror(ret));
		return FALSE;
	}

	ret = VPU_EncGetInitialInfo(vpu_base_enc->handle, &(vpu_base_enc->init_info));
	if (ret != VPU_ENC_RET_SUCCESS)
	{
		GST_ERROR_OBJECT(vpu_base_enc, "retrieving init info failed: %s", gst_fsl_vpu_strerror(ret));
		return FALSE;
	}

	/* Framebuffers are created in handle_frame(), to make sure the actual stride is used */

	/* Set the output state, using caps defined by the derived class */
	output_state = gst_video_encoder_set_output_state(
		encoder,
		klass->get_output_caps(vpu_base_enc),
		state
	);
	gst_video_codec_state_unref(output_state);

	vpu_base_enc->video_info = state->info;

	return TRUE;
}
예제 #7
0
static GstFlowReturn
gst_jpeg_dec_handle_frame (GstVideoDecoder * bdec, GstVideoCodecFrame * frame)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstJpegDec *dec = (GstJpegDec *) bdec;
  GstVideoFrame vframe;
  gint width, height;
  gint r_h, r_v;
  guint code, hdr_ok;
  gboolean need_unmap = TRUE;
  GstVideoCodecState *state = NULL;

  dec->current_frame = frame;
  gst_buffer_map (frame->input_buffer, &dec->current_frame_map, GST_MAP_READ);
  gst_jpeg_dec_fill_input_buffer (&dec->cinfo);

  if (setjmp (dec->jerr.setjmp_buffer)) {
    code = dec->jerr.pub.msg_code;

    if (code == JERR_INPUT_EOF) {
      GST_DEBUG ("jpeg input EOF error, we probably need more data");
      goto need_more_data;
    }
    goto decode_error;
  }

  /* read header */
  hdr_ok = jpeg_read_header (&dec->cinfo, TRUE);
  if (G_UNLIKELY (hdr_ok != JPEG_HEADER_OK)) {
    GST_WARNING_OBJECT (dec, "reading the header failed, %d", hdr_ok);
  }

  GST_LOG_OBJECT (dec, "num_components=%d", dec->cinfo.num_components);
  GST_LOG_OBJECT (dec, "jpeg_color_space=%d", dec->cinfo.jpeg_color_space);

  if (!dec->cinfo.num_components || !dec->cinfo.comp_info)
    goto components_not_supported;

  r_h = dec->cinfo.comp_info[0].h_samp_factor;
  r_v = dec->cinfo.comp_info[0].v_samp_factor;

  GST_LOG_OBJECT (dec, "r_h = %d, r_v = %d", r_h, r_v);

  if (dec->cinfo.num_components > 3)
    goto components_not_supported;

  /* verify color space expectation to avoid going *boom* or bogus output */
  if (dec->cinfo.jpeg_color_space != JCS_YCbCr &&
      dec->cinfo.jpeg_color_space != JCS_GRAYSCALE &&
      dec->cinfo.jpeg_color_space != JCS_RGB)
    goto unsupported_colorspace;

#ifndef GST_DISABLE_GST_DEBUG
  {
    gint i;

    for (i = 0; i < dec->cinfo.num_components; ++i) {
      GST_LOG_OBJECT (dec, "[%d] h_samp_factor=%d, v_samp_factor=%d, cid=%d",
          i, dec->cinfo.comp_info[i].h_samp_factor,
          dec->cinfo.comp_info[i].v_samp_factor,
          dec->cinfo.comp_info[i].component_id);
    }
  }
#endif

  /* prepare for raw output */
  dec->cinfo.do_fancy_upsampling = FALSE;
  dec->cinfo.do_block_smoothing = FALSE;
  dec->cinfo.out_color_space = dec->cinfo.jpeg_color_space;
  dec->cinfo.dct_method = dec->idct_method;
  dec->cinfo.raw_data_out = TRUE;

  GST_LOG_OBJECT (dec, "starting decompress");
  guarantee_huff_tables (&dec->cinfo);
  if (!jpeg_start_decompress (&dec->cinfo)) {
    GST_WARNING_OBJECT (dec, "failed to start decompression cycle");
  }

  /* sanity checks to get safe and reasonable output */
  switch (dec->cinfo.jpeg_color_space) {
    case JCS_GRAYSCALE:
      if (dec->cinfo.num_components != 1)
        goto invalid_yuvrgbgrayscale;
      break;
    case JCS_RGB:
      if (dec->cinfo.num_components != 3 || dec->cinfo.max_v_samp_factor > 1 ||
          dec->cinfo.max_h_samp_factor > 1)
        goto invalid_yuvrgbgrayscale;
      break;
    case JCS_YCbCr:
      if (dec->cinfo.num_components != 3 ||
          r_v > 2 || r_v < dec->cinfo.comp_info[0].v_samp_factor ||
          r_v < dec->cinfo.comp_info[1].v_samp_factor ||
          r_h < dec->cinfo.comp_info[0].h_samp_factor ||
          r_h < dec->cinfo.comp_info[1].h_samp_factor)
        goto invalid_yuvrgbgrayscale;
      break;
    default:
      g_assert_not_reached ();
      break;
  }

  width = dec->cinfo.output_width;
  height = dec->cinfo.output_height;

  if (G_UNLIKELY (width < MIN_WIDTH || width > MAX_WIDTH ||
          height < MIN_HEIGHT || height > MAX_HEIGHT))
    goto wrong_size;

  gst_jpeg_dec_negotiate (dec, width, height, dec->cinfo.jpeg_color_space);

  state = gst_video_decoder_get_output_state (bdec);
  ret = gst_video_decoder_alloc_output_frame (bdec, frame);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto alloc_failed;

  if (!gst_video_frame_map (&vframe, &state->info, frame->output_buffer,
          GST_MAP_READWRITE))
    goto alloc_failed;

  GST_LOG_OBJECT (dec, "width %d, height %d", width, height);

  if (dec->cinfo.jpeg_color_space == JCS_RGB) {
    gst_jpeg_dec_decode_rgb (dec, &vframe);
  } else if (dec->cinfo.jpeg_color_space == JCS_GRAYSCALE) {
    gst_jpeg_dec_decode_grayscale (dec, &vframe);
  } else {
    GST_LOG_OBJECT (dec, "decompressing (reqired scanline buffer height = %u)",
        dec->cinfo.rec_outbuf_height);

    /* For some widths jpeglib requires more horizontal padding than I420 
     * provides. In those cases we need to decode into separate buffers and then
     * copy over the data into our final picture buffer, otherwise jpeglib might
     * write over the end of a line into the beginning of the next line,
     * resulting in blocky artifacts on the left side of the picture. */
    if (G_UNLIKELY (width % (dec->cinfo.max_h_samp_factor * DCTSIZE) != 0
            || dec->cinfo.comp_info[0].h_samp_factor != 2
            || dec->cinfo.comp_info[1].h_samp_factor != 1
            || dec->cinfo.comp_info[2].h_samp_factor != 1)) {
      GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, dec,
          "indirect decoding using extra buffer copy");
      gst_jpeg_dec_decode_indirect (dec, &vframe, r_v, r_h,
          dec->cinfo.num_components);
    } else {
      ret = gst_jpeg_dec_decode_direct (dec, &vframe);

      if (G_UNLIKELY (ret != GST_FLOW_OK))
        goto decode_direct_failed;
    }
  }

  gst_video_frame_unmap (&vframe);

  GST_LOG_OBJECT (dec, "decompressing finished");
  jpeg_finish_decompress (&dec->cinfo);

  /* reset error count on successful decode */
  dec->error_count = 0;

  gst_buffer_unmap (frame->input_buffer, &dec->current_frame_map);
  ret = gst_video_decoder_finish_frame (bdec, frame);
  need_unmap = FALSE;

done:

exit:

  if (G_UNLIKELY (ret == GST_FLOW_ERROR)) {
    jpeg_abort_decompress (&dec->cinfo);
    ret = gst_jpeg_dec_post_error_or_warning (dec);
  }

  if (need_unmap)
    gst_buffer_unmap (frame->input_buffer, &dec->current_frame_map);

  if (state)
    gst_video_codec_state_unref (state);

  return ret;

  /* special cases */
need_more_data:
  {
    GST_LOG_OBJECT (dec, "we need more data");
    ret = GST_FLOW_OK;
    goto exit;
  }
  /* ERRORS */
wrong_size:
  {
    gst_jpeg_dec_set_error (dec, GST_FUNCTION, __LINE__,
        "Picture is too small or too big (%ux%u)", width, height);
    ret = GST_FLOW_ERROR;
    goto done;
  }
decode_error:
  {
    gchar err_msg[JMSG_LENGTH_MAX];

    dec->jerr.pub.format_message ((j_common_ptr) (&dec->cinfo), err_msg);

    gst_jpeg_dec_set_error (dec, GST_FUNCTION, __LINE__,
        "Decode error #%u: %s", code, err_msg);

    gst_buffer_unmap (frame->input_buffer, &dec->current_frame_map);
    gst_video_decoder_drop_frame (bdec, frame);
    need_unmap = FALSE;

    ret = GST_FLOW_ERROR;
    goto done;
  }
decode_direct_failed:
  {
    /* already posted an error message */
    jpeg_abort_decompress (&dec->cinfo);
    goto done;
  }
alloc_failed:
  {
    const gchar *reason;

    reason = gst_flow_get_name (ret);

    GST_DEBUG_OBJECT (dec, "failed to alloc buffer, reason %s", reason);
    /* Reset for next time */
    jpeg_abort_decompress (&dec->cinfo);
    if (ret != GST_FLOW_EOS && ret != GST_FLOW_FLUSHING &&
        ret != GST_FLOW_NOT_LINKED) {
      gst_jpeg_dec_set_error (dec, GST_FUNCTION, __LINE__,
          "Buffer allocation failed, reason: %s", reason);
    }
    goto exit;
  }
components_not_supported:
  {
    gst_jpeg_dec_set_error (dec, GST_FUNCTION, __LINE__,
        "number of components not supported: %d (max 3)",
        dec->cinfo.num_components);
    ret = GST_FLOW_ERROR;
    goto done;
  }
unsupported_colorspace:
  {
    gst_jpeg_dec_set_error (dec, GST_FUNCTION, __LINE__,
        "Picture has unknown or unsupported colourspace");
    ret = GST_FLOW_ERROR;
    goto done;
  }
invalid_yuvrgbgrayscale:
  {
    gst_jpeg_dec_set_error (dec, GST_FUNCTION, __LINE__,
        "Picture is corrupt or unhandled YUV/RGB/grayscale layout");
    ret = GST_FLOW_ERROR;
    goto done;
  }
}
예제 #8
0
static gboolean
gst_openjpeg_enc_set_format (GstVideoEncoder * encoder,
    GstVideoCodecState * state)
{
  GstOpenJPEGEnc *self = GST_OPENJPEG_ENC (encoder);
  GstCaps *allowed_caps, *caps;
  GstStructure *s;
  const gchar *colorspace;
  gint ncomps;

  GST_DEBUG_OBJECT (self, "Setting format: %" GST_PTR_FORMAT, state->caps);

  if (self->input_state)
    gst_video_codec_state_unref (self->input_state);
  self->input_state = gst_video_codec_state_ref (state);

  allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
  allowed_caps = gst_caps_truncate (allowed_caps);
  s = gst_caps_get_structure (allowed_caps, 0);
  if (gst_structure_has_name (s, "image/jp2")) {
    self->codec_format = OPJ_CODEC_JP2;
    self->is_jp2c = FALSE;
  } else if (gst_structure_has_name (s, "image/x-j2c")) {
    self->codec_format = OPJ_CODEC_J2K;
    self->is_jp2c = TRUE;
  } else if (gst_structure_has_name (s, "image/x-jpc")) {
    self->codec_format = OPJ_CODEC_J2K;
    self->is_jp2c = FALSE;
  } else {
    g_return_val_if_reached (FALSE);
  }

  switch (state->info.finfo->format) {
    case GST_VIDEO_FORMAT_ARGB64:
      self->fill_image = fill_image_packed16_4;
      ncomps = 4;
      break;
    case GST_VIDEO_FORMAT_ARGB:
      self->fill_image = fill_image_packed8_4;
      ncomps = 4;
      break;
    case GST_VIDEO_FORMAT_xRGB:
      self->fill_image = fill_image_packed8_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_AYUV64:
      self->fill_image = fill_image_packed16_4;
      ncomps = 4;
      break;
    case GST_VIDEO_FORMAT_Y444_10LE:
    case GST_VIDEO_FORMAT_Y444_10BE:
    case GST_VIDEO_FORMAT_I422_10LE:
    case GST_VIDEO_FORMAT_I422_10BE:
    case GST_VIDEO_FORMAT_I420_10LE:
    case GST_VIDEO_FORMAT_I420_10BE:
      self->fill_image = fill_image_planar16_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_AYUV:
      self->fill_image = fill_image_packed8_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_Y444:
    case GST_VIDEO_FORMAT_Y42B:
    case GST_VIDEO_FORMAT_I420:
    case GST_VIDEO_FORMAT_Y41B:
    case GST_VIDEO_FORMAT_YUV9:
      self->fill_image = fill_image_planar8_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_GRAY8:
      self->fill_image = fill_image_planar8_1;
      ncomps = 1;
      break;
    case GST_VIDEO_FORMAT_GRAY16_LE:
    case GST_VIDEO_FORMAT_GRAY16_BE:
      self->fill_image = fill_image_planar16_1;
      ncomps = 1;
      break;
    default:
      g_assert_not_reached ();
  }

  if ((state->info.finfo->flags & GST_VIDEO_FORMAT_FLAG_YUV))
    colorspace = "sYUV";
  else if ((state->info.finfo->flags & GST_VIDEO_FORMAT_FLAG_RGB))
    colorspace = "sRGB";
  else if ((state->info.finfo->flags & GST_VIDEO_FORMAT_FLAG_GRAY))
    colorspace = "GRAY";
  else
    g_return_val_if_reached (FALSE);

  caps = gst_caps_new_simple (gst_structure_get_name (s),
      "colorspace", G_TYPE_STRING, colorspace,
      "num-components", G_TYPE_INT, ncomps, NULL);
  gst_caps_unref (allowed_caps);

  if (self->output_state)
    gst_video_codec_state_unref (self->output_state);
  self->output_state =
      gst_video_encoder_set_output_state (encoder, caps, state);

  gst_video_encoder_negotiate (GST_VIDEO_ENCODER (encoder));

  return TRUE;
}
예제 #9
0
static GstFlowReturn
gst_pngdec_caps_create_and_set (GstPngDec * pngdec)
{
  GstFlowReturn ret = GST_FLOW_OK;
  gint bpc = 0, color_type;
  png_uint_32 width, height;
  GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;

  g_return_val_if_fail (GST_IS_PNGDEC (pngdec), GST_FLOW_ERROR);

  /* Get bits per channel */
  bpc = png_get_bit_depth (pngdec->png, pngdec->info);

  /* Get Color type */
  color_type = png_get_color_type (pngdec->png, pngdec->info);

  /* Add alpha channel if 16-bit depth, but not for GRAY images */
  if ((bpc > 8) && (color_type != PNG_COLOR_TYPE_GRAY)) {
    png_set_add_alpha (pngdec->png, 0xffff, PNG_FILLER_BEFORE);
    png_set_swap (pngdec->png);
  }
#if 0
  /* We used to have this HACK to reverse the outgoing bytes, but the problem
   * that originally required the hack seems to have been in videoconvert's
   * RGBA descriptions. It doesn't seem needed now that's fixed, but might
   * still be needed on big-endian systems, I'm not sure. J.S. 6/7/2007 */
  if (color_type == PNG_COLOR_TYPE_RGB_ALPHA)
    png_set_bgr (pngdec->png);
#endif

  /* Gray scale with alpha channel converted to RGB */
  if (color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
    GST_LOG_OBJECT (pngdec,
        "converting grayscale png with alpha channel to RGB");
    png_set_gray_to_rgb (pngdec->png);
  }

  /* Gray scale converted to upscaled to 8 bits */
  if ((color_type == PNG_COLOR_TYPE_GRAY_ALPHA) ||
      (color_type == PNG_COLOR_TYPE_GRAY)) {
    if (bpc < 8) {              /* Convert to 8 bits */
      GST_LOG_OBJECT (pngdec, "converting grayscale image to 8 bits");
#if PNG_LIBPNG_VER < 10400
      png_set_gray_1_2_4_to_8 (pngdec->png);
#else
      png_set_expand_gray_1_2_4_to_8 (pngdec->png);
#endif
    }
  }

  /* Palette converted to RGB */
  if (color_type == PNG_COLOR_TYPE_PALETTE) {
    GST_LOG_OBJECT (pngdec, "converting palette png to RGB");
    png_set_palette_to_rgb (pngdec->png);
  }

  png_set_interlace_handling (pngdec->png);

  /* Update the info structure */
  png_read_update_info (pngdec->png, pngdec->info);

  /* Get IHDR header again after transformation settings */
  png_get_IHDR (pngdec->png, pngdec->info, &width, &height,
      &bpc, &pngdec->color_type, NULL, NULL, NULL);

  GST_LOG_OBJECT (pngdec, "this is a %dx%d PNG image", (gint) width,
      (gint) height);

  switch (pngdec->color_type) {
    case PNG_COLOR_TYPE_RGB:
      GST_LOG_OBJECT (pngdec, "we have no alpha channel, depth is 24 bits");
      if (bpc == 8)
        format = GST_VIDEO_FORMAT_RGB;
      break;
    case PNG_COLOR_TYPE_RGB_ALPHA:
      GST_LOG_OBJECT (pngdec,
          "we have an alpha channel, depth is 32 or 64 bits");
      if (bpc == 8)
        format = GST_VIDEO_FORMAT_RGBA;
      else if (bpc == 16)
        format = GST_VIDEO_FORMAT_ARGB64;
      break;
    case PNG_COLOR_TYPE_GRAY:
      GST_LOG_OBJECT (pngdec,
          "We have an gray image, depth is 8 or 16 (be) bits");
      if (bpc == 8)
        format = GST_VIDEO_FORMAT_GRAY8;
      else if (bpc == 16)
        format = GST_VIDEO_FORMAT_GRAY16_BE;
      break;
    default:
      break;
  }

  if (format == GST_VIDEO_FORMAT_UNKNOWN) {
    GST_ELEMENT_ERROR (pngdec, STREAM, NOT_IMPLEMENTED, (NULL),
        ("pngdec does not support this color type"));
    ret = GST_FLOW_NOT_SUPPORTED;
    goto beach;
  }

  /* Check if output state changed */
  if (pngdec->output_state) {
    GstVideoInfo *info = &pngdec->output_state->info;

    if (width == GST_VIDEO_INFO_WIDTH (info) &&
        height == GST_VIDEO_INFO_HEIGHT (info) &&
        GST_VIDEO_INFO_FORMAT (info) == format) {
      goto beach;
    }
    gst_video_codec_state_unref (pngdec->output_state);
  }

  pngdec->output_state =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (pngdec), format,
      width, height, pngdec->input_state);
  gst_video_decoder_negotiate (GST_VIDEO_DECODER (pngdec));
  GST_DEBUG ("Final %d %d", GST_VIDEO_INFO_WIDTH (&pngdec->output_state->info),
      GST_VIDEO_INFO_HEIGHT (&pngdec->output_state->info));

beach:
  return ret;
}
예제 #10
0
static gboolean
gst_vaapidecode_update_src_caps (GstVaapiDecode * decode)
{
  GstVideoDecoder *const vdec = GST_VIDEO_DECODER (decode);
  GstVideoCodecState *state, *ref_state;
  GstVideoInfo *vi;
  GstVideoFormat format = GST_VIDEO_FORMAT_I420;

  if (!decode->input_state)
    return FALSE;

  ref_state = decode->input_state;

  GstCapsFeatures *features = NULL;
  GstVaapiCapsFeature feature;

  feature =
      gst_vaapi_find_preferred_caps_feature (GST_VIDEO_DECODER_SRC_PAD (vdec),
      GST_VIDEO_INFO_FORMAT (&ref_state->info), &format);

  if (feature == GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED)
    return FALSE;

  switch (feature) {
#if (USE_GLX || USE_EGL)
    case GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META:
      features =
          gst_caps_features_new
          (GST_CAPS_FEATURE_META_GST_VIDEO_GL_TEXTURE_UPLOAD_META, NULL);
      break;
#endif
#if GST_CHECK_VERSION(1,3,1)
    case GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE:
      features =
          gst_caps_features_new (GST_CAPS_FEATURE_MEMORY_VAAPI_SURFACE, NULL);
      break;
#endif
    default:
      break;
  }

  state = gst_video_decoder_set_output_state (vdec, format,
      ref_state->info.width, ref_state->info.height, ref_state);
  if (!state || state->info.width == 0 || state->info.height == 0)
    return FALSE;

  vi = &state->info;

  state->caps = gst_video_info_to_caps (vi);
  if (features)
    gst_caps_set_features (state->caps, 0, features);
  GST_INFO_OBJECT (decode, "new src caps = %" GST_PTR_FORMAT, state->caps);
  gst_caps_replace (&decode->srcpad_caps, state->caps);
  gst_video_codec_state_unref (state);

  gint fps_n = GST_VIDEO_INFO_FPS_N (vi);
  gint fps_d = GST_VIDEO_INFO_FPS_D (vi);
  if (fps_n <= 0 || fps_d <= 0) {
    GST_DEBUG_OBJECT (decode, "forcing 25/1 framerate for latency calculation");
    fps_n = 25;
    fps_d = 1;
  }

  /* For parsing/preparation purposes we'd need at least 1 frame
   * latency in general, with perfectly known unit boundaries (NALU,
   * AU), and up to 2 frames when we need to wait for the second frame
   * start to determine the first frame is complete */
  GstClockTime latency = gst_util_uint64_scale (2 * GST_SECOND, fps_d, fps_n);
  gst_video_decoder_set_latency (vdec, latency, latency);

  return TRUE;
}
예제 #11
0
static gboolean
gst_vaapidecode_update_src_caps (GstVaapiDecode * decode)
{
  GstVideoDecoder *const vdec = GST_VIDEO_DECODER (decode);
  GstPad *const srcpad = GST_VIDEO_DECODER_SRC_PAD (vdec);
  GstCaps *allowed;
  GstVideoCodecState *state, *ref_state;
  GstVaapiCapsFeature feature;
  GstCapsFeatures *features;
  GstCaps *allocation_caps;
  GstVideoInfo *vi;
  GstVideoFormat format;
  GstClockTime latency;
  gint fps_d, fps_n;
  guint width, height;
  const gchar *format_str, *feature_str;

  if (!decode->input_state)
    return FALSE;

  ref_state = decode->input_state;

  format = GST_VIDEO_INFO_FORMAT (&decode->decoded_info);
  allowed = gst_vaapidecode_get_allowed_srcpad_caps (decode);
  feature = gst_vaapi_find_preferred_caps_feature (srcpad, allowed, &format);
  gst_caps_unref (allowed);

  if (feature == GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED)
    return FALSE;

#if (!USE_GLX && !USE_EGL)
  /* This is a very pathological situation. Should not happen. */
  if (feature == GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META)
    return FALSE;
#endif

  if ((feature == GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY ||
          feature == GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE)
      && format != GST_VIDEO_INFO_FORMAT (&decode->decoded_info)) {
    GST_FIXME_OBJECT (decode, "validate if driver can convert from %s to %s",
        gst_video_format_to_string (GST_VIDEO_INFO_FORMAT
            (&decode->decoded_info)), gst_video_format_to_string (format));
  }

  width = decode->display_width;
  height = decode->display_height;

  if (!width || !height) {
    width = GST_VIDEO_INFO_WIDTH (&ref_state->info);
    height = GST_VIDEO_INFO_HEIGHT (&ref_state->info);
  }

  state = gst_video_decoder_set_output_state (vdec, format, width, height,
      ref_state);
  if (!state)
    return FALSE;

  if (GST_VIDEO_INFO_WIDTH (&state->info) == 0
      || GST_VIDEO_INFO_HEIGHT (&state->info) == 0) {
    gst_video_codec_state_unref (state);
    return FALSE;
  }

  vi = &state->info;
  state->caps = gst_video_info_to_caps (vi);

  switch (feature) {
    case GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META:
    case GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE:{
      GstStructure *structure = gst_caps_get_structure (state->caps, 0);

      /* Remove chroma-site and colorimetry from src caps,
       * which is unnecessary on downstream if using VASurface
       */
      gst_structure_remove_fields (structure, "chroma-site", "colorimetry",
          NULL);

      feature_str = gst_vaapi_caps_feature_to_string (feature);
      features = gst_caps_features_new (feature_str, NULL);
      gst_caps_set_features (state->caps, 0, features);
      break;
    }
    default:
      break;
  }

  /* Allocation query is different from pad's caps */
  allocation_caps = NULL;
  if (GST_VIDEO_INFO_WIDTH (&decode->decoded_info) != width
      || GST_VIDEO_INFO_HEIGHT (&decode->decoded_info) != height) {
    allocation_caps = gst_caps_copy (state->caps);
    format_str = gst_video_format_to_string (format);
    gst_caps_set_simple (allocation_caps,
        "width", G_TYPE_INT, GST_VIDEO_INFO_WIDTH (&decode->decoded_info),
        "height", G_TYPE_INT, GST_VIDEO_INFO_HEIGHT (&decode->decoded_info),
        "format", G_TYPE_STRING, format_str, NULL);
    GST_INFO_OBJECT (decode, "new alloc caps = %" GST_PTR_FORMAT,
        allocation_caps);
  }
  gst_caps_replace (&state->allocation_caps, allocation_caps);
  if (allocation_caps)
    gst_caps_unref (allocation_caps);

  GST_INFO_OBJECT (decode, "new src caps = %" GST_PTR_FORMAT, state->caps);
  gst_caps_replace (&decode->srcpad_caps, state->caps);
  gst_video_codec_state_unref (state);

  fps_n = GST_VIDEO_INFO_FPS_N (vi);
  fps_d = GST_VIDEO_INFO_FPS_D (vi);
  if (fps_n <= 0 || fps_d <= 0) {
    GST_DEBUG_OBJECT (decode, "forcing 25/1 framerate for latency calculation");
    fps_n = 25;
    fps_d = 1;
  }

  /* For parsing/preparation purposes we'd need at least 1 frame
   * latency in general, with perfectly known unit boundaries (NALU,
   * AU), and up to 2 frames when we need to wait for the second frame
   * start to determine the first frame is complete */
  latency = gst_util_uint64_scale (2 * GST_SECOND, fps_d, fps_n);
  gst_video_decoder_set_latency (vdec, latency, latency);

  return TRUE;
}
예제 #12
0
static gboolean
gst_mpeg2dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query)
{
  GstMpeg2dec *dec = GST_MPEG2DEC (decoder);
  GstVideoCodecState *state;
  GstBufferPool *pool;
  guint size, min, max;
  GstStructure *config;
  GstAllocator *allocator;
  GstAllocationParams params;
  gboolean update_allocator;

  /* Set allocation parameters to guarantee 16-byte aligned output buffers */
  if (gst_query_get_n_allocation_params (query) > 0) {
    gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
    update_allocator = TRUE;
  } else {
    allocator = NULL;
    gst_allocation_params_init (&params);
    update_allocator = FALSE;
  }

  params.align = MAX (params.align, 15);

  if (update_allocator)
    gst_query_set_nth_allocation_param (query, 0, allocator, &params);
  else
    gst_query_add_allocation_param (query, allocator, &params);
  if (allocator)
    gst_object_unref (allocator);

  /* Now chain up to the parent class to guarantee that we can
   * get a buffer pool from the query */
  if (!GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder,
          query))
    return FALSE;

  state = gst_video_decoder_get_output_state (decoder);

  gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);

  dec->has_cropping = FALSE;
  config = gst_buffer_pool_get_config (pool);
  if (gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL)) {
    gst_buffer_pool_config_add_option (config,
        GST_BUFFER_POOL_OPTION_VIDEO_META);
    dec->has_cropping =
        gst_query_find_allocation_meta (query, GST_VIDEO_CROP_META_API_TYPE,
        NULL);
  }

  if (dec->has_cropping) {
    GstCaps *caps;

    /* Calculate uncropped size */
    size = MAX (size, dec->decoded_info.size);
    caps = gst_video_info_to_caps (&dec->decoded_info);
    gst_buffer_pool_config_set_params (config, caps, size, min, max);
    gst_caps_unref (caps);
  }

  gst_buffer_pool_set_config (pool, config);

  gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);

  gst_object_unref (pool);
  gst_video_codec_state_unref (state);

  return TRUE;
}
예제 #13
0
static GstFlowReturn
handle_sequence (GstMpeg2dec * mpeg2dec, const mpeg2_info_t * info)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstClockTime latency;
  const mpeg2_sequence_t *sequence;
  GstVideoCodecState *state;
  GstVideoInfo *dinfo = &mpeg2dec->decoded_info;
  GstVideoInfo *vinfo;
  GstVideoFormat format;

  sequence = info->sequence;

  if (sequence->frame_period == 0)
    goto invalid_frame_period;

  /* mpeg2 video can only be from 16x16 to 4096x4096. Everything
   * else is a corrupted file */
  if (sequence->width > 4096 || sequence->width < 16 ||
      sequence->height > 4096 || sequence->height < 16)
    goto invalid_size;

  GST_DEBUG_OBJECT (mpeg2dec,
      "widthxheight: %dx%d , decoded_widthxheight: %dx%d",
      sequence->picture_width, sequence->picture_height, sequence->width,
      sequence->height);

  if (sequence->picture_width != sequence->width ||
      sequence->picture_height != sequence->height) {
    GST_DEBUG_OBJECT (mpeg2dec, "we need to crop");
    mpeg2dec->need_cropping = TRUE;
  } else {
    GST_DEBUG_OBJECT (mpeg2dec, "no cropping needed");
    mpeg2dec->need_cropping = FALSE;
  }

  /* get subsampling */
  if (sequence->chroma_width < sequence->width) {
    /* horizontally subsampled */
    if (sequence->chroma_height < sequence->height) {
      /* and vertically subsamples */
      format = GST_VIDEO_FORMAT_I420;
    } else {
      format = GST_VIDEO_FORMAT_Y42B;
    }
  } else {
    /* not subsampled */
    format = GST_VIDEO_FORMAT_Y444;
  }

  state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (mpeg2dec),
      format, sequence->picture_width, sequence->picture_height,
      mpeg2dec->input_state);
  vinfo = &state->info;

  /* If we don't have a valid upstream PAR override it */
  if (GST_VIDEO_INFO_PAR_N (vinfo) == 1 &&
      GST_VIDEO_INFO_PAR_D (vinfo) == 1 &&
      sequence->pixel_width != 0 && sequence->pixel_height != 0) {
#if MPEG2_RELEASE >= MPEG2_VERSION(0,5,0)
    guint pixel_width, pixel_height;
    if (mpeg2_guess_aspect (sequence, &pixel_width, &pixel_height)) {
      vinfo->par_n = pixel_width;
      vinfo->par_d = pixel_height;
    }
#else
    vinfo->par_n = sequence->pixel_width;
    vinfo->par_d = sequence->pixel_height;
#endif
    GST_DEBUG_OBJECT (mpeg2dec, "Setting PAR %d x %d",
        vinfo->par_n, vinfo->par_d);
  }
  vinfo->fps_n = 27000000;
  vinfo->fps_d = sequence->frame_period;

  if (!(sequence->flags & SEQ_FLAG_PROGRESSIVE_SEQUENCE))
    vinfo->interlace_mode = GST_VIDEO_INTERLACE_MODE_MIXED;
  else
    vinfo->interlace_mode = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;

  vinfo->chroma_site = GST_VIDEO_CHROMA_SITE_MPEG2;
  vinfo->colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235;

  if (sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION) {
    /* do color description */
    switch (sequence->colour_primaries) {
      case 1:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT709;
        break;
      case 4:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470M;
        break;
      case 5:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG;
        break;
      case 6:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE170M;
        break;
      case 7:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE240M;
        break;
        /* 0 forbidden */
        /* 2 unspecified */
        /* 3 reserved */
        /* 8-255 reseved */
      default:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN;
        break;
    }
    /* matrix coefficients */
    switch (sequence->matrix_coefficients) {
      case 1:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT709;
        break;
      case 4:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_FCC;
        break;
      case 5:
      case 6:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601;
        break;
      case 7:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_SMPTE240M;
        break;
        /* 0 forbidden */
        /* 2 unspecified */
        /* 3 reserved */
        /* 8-255 reseved */
      default:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_UNKNOWN;
        break;
    }
    /* transfer characteristics */
    switch (sequence->transfer_characteristics) {
      case 1:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
        break;
      case 4:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA22;
        break;
      case 5:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA28;
        break;
      case 6:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
        break;
      case 7:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_SMPTE240M;
        break;
      case 8:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA10;
        break;
        /* 0 forbidden */
        /* 2 unspecified */
        /* 3 reserved */
        /* 9-255 reseved */
      default:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_UNKNOWN;
        break;
    }
  }

  GST_DEBUG_OBJECT (mpeg2dec,
      "sequence flags: %d, frame period: %d, frame rate: %d/%d",
      sequence->flags, sequence->frame_period, vinfo->fps_n, vinfo->fps_d);
  GST_DEBUG_OBJECT (mpeg2dec, "profile: %02x, colour_primaries: %d",
      sequence->profile_level_id, sequence->colour_primaries);
  GST_DEBUG_OBJECT (mpeg2dec, "transfer chars: %d, matrix coef: %d",
      sequence->transfer_characteristics, sequence->matrix_coefficients);
  GST_DEBUG_OBJECT (mpeg2dec,
      "FLAGS: CONSTRAINED_PARAMETERS:%d, PROGRESSIVE_SEQUENCE:%d",
      sequence->flags & SEQ_FLAG_CONSTRAINED_PARAMETERS,
      sequence->flags & SEQ_FLAG_PROGRESSIVE_SEQUENCE);
  GST_DEBUG_OBJECT (mpeg2dec, "FLAGS: LOW_DELAY:%d, COLOUR_DESCRIPTION:%d",
      sequence->flags & SEQ_FLAG_LOW_DELAY,
      sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION);

  /* we store the codec size before cropping */
  *dinfo = *vinfo;
  gst_video_info_set_format (dinfo, format, sequence->width, sequence->height);

  /* Mpeg2dec has 2 frame latency to produce a picture and 1 frame latency in
   * it's parser */
  latency = gst_util_uint64_scale (3, vinfo->fps_d, vinfo->fps_n);
  gst_video_decoder_set_latency (GST_VIDEO_DECODER (mpeg2dec), latency,
      latency);

  if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (mpeg2dec)))
    goto negotiation_fail;

  gst_video_codec_state_unref (state);

  mpeg2_custom_fbuf (mpeg2dec->decoder, 1);

  init_dummybuf (mpeg2dec);

  /* Pump in some null buffers, because otherwise libmpeg2 doesn't
   * initialise the discard_fbuf->id */
  mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL);
  mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL);
  mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL);
  gst_mpeg2dec_clear_buffers (mpeg2dec);

  return ret;

invalid_frame_period:
  {
    GST_WARNING_OBJECT (mpeg2dec, "Frame period is 0!");
    return GST_FLOW_ERROR;
  }
invalid_size:
  {
    GST_ERROR_OBJECT (mpeg2dec, "Invalid frame dimensions: %d x %d",
        sequence->width, sequence->height);
    return GST_FLOW_ERROR;
  }

negotiation_fail:
  {
    GST_WARNING_OBJECT (mpeg2dec, "Failed to negotiate with downstream");
    return GST_FLOW_ERROR;
  }
}
static GstFlowReturn
gst_v4l2_video_dec_handle_frame (GstVideoDecoder * decoder,
    GstVideoCodecFrame * frame)
{
  GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
  GstFlowReturn ret = GST_FLOW_OK;

  GST_DEBUG_OBJECT (self, "Handling frame %d", frame->system_frame_number);

  if (G_UNLIKELY (!g_atomic_int_get (&self->active)))
    goto flushing;

  if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2output))) {
    if (!self->input_state)
      goto not_negotiated;
    if (!gst_v4l2_object_set_format (self->v4l2output, self->input_state->caps))
      goto not_negotiated;
  }

  if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2capture))) {
    GstBufferPool *pool = GST_BUFFER_POOL (self->v4l2output->pool);
    GstVideoInfo info;
    GstVideoCodecState *output_state;
    GstBuffer *codec_data;

    GST_DEBUG_OBJECT (self, "Sending header");

    codec_data = self->input_state->codec_data;

    /* We are running in byte-stream mode, so we don't know the headers, but
     * we need to send something, otherwise the decoder will refuse to
     * intialize.
     */
    if (codec_data) {
      gst_buffer_ref (codec_data);
    } else {
      codec_data = frame->input_buffer;
      frame->input_buffer = NULL;
    }

    /* Ensure input internal pool is active */
    if (!gst_buffer_pool_is_active (pool)) {
      GstStructure *config = gst_buffer_pool_get_config (pool);
      gst_buffer_pool_config_set_params (config, self->input_state->caps,
          self->v4l2output->info.size, 2, 2);

      /* There is no reason to refuse this config */
      if (!gst_buffer_pool_set_config (pool, config))
        goto activate_failed;

      if (!gst_buffer_pool_set_active (pool, TRUE))
        goto activate_failed;
    }

    GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
    ret =
        gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->
            v4l2output->pool), &codec_data);
    GST_VIDEO_DECODER_STREAM_LOCK (decoder);

    gst_buffer_unref (codec_data);

    if (!gst_v4l2_object_acquire_format (self->v4l2capture, &info))
      goto not_negotiated;

    output_state = gst_video_decoder_set_output_state (decoder,
        info.finfo->format, info.width, info.height, self->input_state);

    /* Copy the rest of the information, there might be more in the future */
    output_state->info.interlace_mode = info.interlace_mode;
    gst_video_codec_state_unref (output_state);

    if (!gst_video_decoder_negotiate (decoder)) {
      if (GST_PAD_IS_FLUSHING (decoder->srcpad))
        goto flushing;
      else
        goto not_negotiated;
    }

    /* Ensure our internal pool is activated */
    if (!gst_buffer_pool_set_active (GST_BUFFER_POOL (self->v4l2capture->pool),
            TRUE))
      goto activate_failed;
  }

  if (g_atomic_int_get (&self->processing) == FALSE) {
    /* It's possible that the processing thread stopped due to an error */
    if (self->output_flow != GST_FLOW_OK &&
        self->output_flow != GST_FLOW_FLUSHING) {
      GST_DEBUG_OBJECT (self, "Processing loop stopped with error, leaving");
      ret = self->output_flow;
      goto drop;
    }

    GST_DEBUG_OBJECT (self, "Starting decoding thread");

    /* Start the processing task, when it quits, the task will disable input
     * processing to unlock input if draining, or prevent potential block */
    g_atomic_int_set (&self->processing, TRUE);
    if (!gst_pad_start_task (decoder->srcpad,
            (GstTaskFunction) gst_v4l2_video_dec_loop, self,
            (GDestroyNotify) gst_v4l2_video_dec_loop_stopped))
      goto start_task_failed;
  }

  if (frame->input_buffer) {
    GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
    ret =
        gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->v4l2output->
            pool), &frame->input_buffer);
    GST_VIDEO_DECODER_STREAM_LOCK (decoder);

    if (ret == GST_FLOW_FLUSHING) {
      if (g_atomic_int_get (&self->processing) == FALSE)
        ret = self->output_flow;
      goto drop;
    } else if (ret != GST_FLOW_OK) {
      goto process_failed;
    }

    /* No need to keep input arround */
    gst_buffer_replace (&frame->input_buffer, NULL);
  }

  gst_video_codec_frame_unref (frame);
  return ret;

  /* ERRORS */
not_negotiated:
  {
    GST_ERROR_OBJECT (self, "not negotiated");
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto drop;
  }
activate_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS,
        (_("Failed to allocate required memory.")),
        ("Buffer pool activation failed"));
    ret = GST_FLOW_ERROR;
    goto drop;
  }
flushing:
  {
    ret = GST_FLOW_FLUSHING;
    goto drop;
  }

start_task_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
        (_("Failed to start decoding thread.")), (NULL));
    g_atomic_int_set (&self->processing, FALSE);
    ret = GST_FLOW_ERROR;
    goto drop;
  }
process_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
        (_("Failed to process frame.")),
        ("Maybe be due to not enough memory or failing driver"));
    ret = GST_FLOW_ERROR;
    goto drop;
  }
drop:
  {
    gst_video_decoder_drop_frame (decoder, frame);
    return ret;
  }
}
예제 #15
0
static gboolean
gst_amc_video_dec_set_format (GstVideoDecoder * decoder,
    GstVideoCodecState * state)
{
  GstAmcVideoDec *self;
  GstAmcFormat *format;
  const gchar *mime;
  gboolean is_format_change = FALSE;
  gboolean needs_disable = FALSE;
  gchar *format_string;
  guint8 *codec_data = NULL;
  gsize codec_data_size = 0;
  GError *err = NULL;

  self = GST_AMC_VIDEO_DEC (decoder);

  GST_DEBUG_OBJECT (self, "Setting new caps %" GST_PTR_FORMAT, state->caps);

  /* Check if the caps change is a real format change or if only irrelevant
   * parts of the caps have changed or nothing at all.
   */
  is_format_change |= self->color_format_info.width != state->info.width;
  is_format_change |= self->color_format_info.height != state->info.height;
  if (state->codec_data) {
    GstMapInfo cminfo;

    gst_buffer_map (state->codec_data, &cminfo, GST_MAP_READ);
    codec_data = g_memdup (cminfo.data, cminfo.size);
    codec_data_size = cminfo.size;

    is_format_change |= (!self->codec_data
        || self->codec_data_size != codec_data_size
        || memcmp (self->codec_data, codec_data, codec_data_size) != 0);
    gst_buffer_unmap (state->codec_data, &cminfo);
  } else if (self->codec_data) {
    is_format_change |= TRUE;
  }

  needs_disable = self->started;

  /* If the component is not started and a real format change happens
   * we have to restart the component. If no real format change
   * happened we can just exit here.
   */
  if (needs_disable && !is_format_change) {
    g_free (codec_data);
    codec_data = NULL;
    codec_data_size = 0;

    /* Framerate or something minor changed */
    self->input_state_changed = TRUE;
    if (self->input_state)
      gst_video_codec_state_unref (self->input_state);
    self->input_state = gst_video_codec_state_ref (state);
    GST_DEBUG_OBJECT (self,
        "Already running and caps did not change the format");
    return TRUE;
  }

  if (needs_disable && is_format_change) {
    gst_amc_video_dec_drain (self);
    GST_VIDEO_DECODER_STREAM_UNLOCK (self);
    gst_amc_video_dec_stop (GST_VIDEO_DECODER (self));
    GST_VIDEO_DECODER_STREAM_LOCK (self);
    gst_amc_video_dec_close (GST_VIDEO_DECODER (self));
    if (!gst_amc_video_dec_open (GST_VIDEO_DECODER (self))) {
      GST_ERROR_OBJECT (self, "Failed to open codec again");
      return FALSE;
    }

    if (!gst_amc_video_dec_start (GST_VIDEO_DECODER (self))) {
      GST_ERROR_OBJECT (self, "Failed to start codec again");
    }
  }
  /* srcpad task is not running at this point */
  if (self->input_state)
    gst_video_codec_state_unref (self->input_state);
  self->input_state = NULL;

  g_free (self->codec_data);
  self->codec_data = codec_data;
  self->codec_data_size = codec_data_size;

  mime = caps_to_mime (state->caps);
  if (!mime) {
    GST_ERROR_OBJECT (self, "Failed to convert caps to mime");
    return FALSE;
  }

  format =
      gst_amc_format_new_video (mime, state->info.width, state->info.height,
      &err);
  if (!format) {
    GST_ERROR_OBJECT (self, "Failed to create video format");
    GST_ELEMENT_ERROR_FROM_ERROR (self, err);
    return FALSE;
  }

  /* FIXME: This buffer needs to be valid until the codec is stopped again */
  if (self->codec_data) {
    gst_amc_format_set_buffer (format, "csd-0", self->codec_data,
        self->codec_data_size, &err);
    if (err)
      GST_ELEMENT_WARNING_FROM_ERROR (self, err);
  }

  format_string = gst_amc_format_to_string (format, &err);
  if (err)
    GST_ELEMENT_WARNING_FROM_ERROR (self, err);
  GST_DEBUG_OBJECT (self, "Configuring codec with format: %s",
      GST_STR_NULL (format_string));
  g_free (format_string);

  if (!gst_amc_codec_configure (self->codec, format, 0, &err)) {
    GST_ERROR_OBJECT (self, "Failed to configure codec");
    GST_ELEMENT_ERROR_FROM_ERROR (self, err);
    return FALSE;
  }

  gst_amc_format_free (format);

  if (!gst_amc_codec_start (self->codec, &err)) {
    GST_ERROR_OBJECT (self, "Failed to start codec");
    GST_ELEMENT_ERROR_FROM_ERROR (self, err);
    return FALSE;
  }

  self->started = TRUE;
  self->input_state = gst_video_codec_state_ref (state);
  self->input_state_changed = TRUE;

  /* Start the srcpad loop again */
  self->flushing = FALSE;
  self->downstream_flow_ret = GST_FLOW_OK;
  gst_pad_start_task (GST_VIDEO_DECODER_SRC_PAD (self),
      (GstTaskFunction) gst_amc_video_dec_loop, decoder, NULL);

  return TRUE;
}
예제 #16
0
static GstFlowReturn
gst_openjpeg_dec_negotiate (GstOpenJPEGDec * self, opj_image_t * image)
{
  GstVideoFormat format;
  gint width, height;

  if (image->color_space == CLRSPC_UNKNOWN || image->color_space == 0)
    image->color_space = self->color_space;

  switch (image->color_space) {
    case CLRSPC_SRGB:
      if (image->numcomps == 4) {
        if (image->comps[0].dx != 1 || image->comps[0].dy != 1 ||
            image->comps[1].dx != 1 || image->comps[1].dy != 1 ||
            image->comps[2].dx != 1 || image->comps[2].dy != 1 ||
            image->comps[3].dx != 1 || image->comps[3].dy != 1) {
          GST_ERROR_OBJECT (self, "Sub-sampling for RGB not supported");
          return GST_FLOW_NOT_NEGOTIATED;
        }

        if (get_highest_prec (image) == 8) {
          self->fill_frame = fill_frame_packed8_4;
          format = GST_VIDEO_FORMAT_ARGB;
        } else if (get_highest_prec (image) <= 16) {
          self->fill_frame = fill_frame_packed16_4;
          format = GST_VIDEO_FORMAT_ARGB64;
        } else {
          GST_ERROR_OBJECT (self, "Unsupported depth %d", image->comps[3].prec);
          return GST_FLOW_NOT_NEGOTIATED;
        }
      } else if (image->numcomps == 3) {
        if (image->comps[0].dx != 1 || image->comps[0].dy != 1 ||
            image->comps[1].dx != 1 || image->comps[1].dy != 1 ||
            image->comps[2].dx != 1 || image->comps[2].dy != 1) {
          GST_ERROR_OBJECT (self, "Sub-sampling for RGB not supported");
          return GST_FLOW_NOT_NEGOTIATED;
        }

        if (get_highest_prec (image) == 8) {
          self->fill_frame = fill_frame_packed8_3;
          format = GST_VIDEO_FORMAT_ARGB;
        } else if (get_highest_prec (image) <= 16) {
          self->fill_frame = fill_frame_packed16_3;
          format = GST_VIDEO_FORMAT_ARGB64;
        } else {
          GST_ERROR_OBJECT (self, "Unsupported depth %d",
              get_highest_prec (image));
          return GST_FLOW_NOT_NEGOTIATED;
        }
      } else {
        GST_ERROR_OBJECT (self, "Unsupported number of RGB components: %d",
            image->numcomps);
        return GST_FLOW_NOT_NEGOTIATED;
      }
      break;
    case CLRSPC_GRAY:
      if (image->numcomps == 1) {
        if (image->comps[0].dx != 1 && image->comps[0].dy != 1) {
          GST_ERROR_OBJECT (self, "Sub-sampling for GRAY not supported");
          return GST_FLOW_NOT_NEGOTIATED;
        }

        if (get_highest_prec (image) == 8) {
          self->fill_frame = fill_frame_planar8_1;
          format = GST_VIDEO_FORMAT_GRAY8;
        } else if (get_highest_prec (image) <= 16) {
          self->fill_frame = fill_frame_planar16_1;
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
          format = GST_VIDEO_FORMAT_GRAY16_LE;
#else
          format = GST_VIDEO_FORMAT_GRAY16_BE;
#endif
        } else {
          GST_ERROR_OBJECT (self, "Unsupported depth %d",
              get_highest_prec (image));
          return GST_FLOW_NOT_NEGOTIATED;
        }
      } else {
        GST_ERROR_OBJECT (self, "Unsupported number of GRAY components: %d",
            image->numcomps);
        return GST_FLOW_NOT_NEGOTIATED;
      }
      break;
    case CLRSPC_SYCC:
      if (image->numcomps != 3 && image->numcomps != 4) {
        GST_ERROR_OBJECT (self, "Unsupported number of YUV components: %d",
            image->numcomps);
        return GST_FLOW_NOT_NEGOTIATED;
      }

      if (image->comps[0].dx != 1 || image->comps[0].dy != 1) {
        GST_ERROR_OBJECT (self, "Sub-sampling of luma plane not supported");
        return GST_FLOW_NOT_NEGOTIATED;
      }

      if (image->comps[1].dx != image->comps[2].dx ||
          image->comps[1].dy != image->comps[2].dy) {
        GST_ERROR_OBJECT (self,
            "Different sub-sampling of chroma planes not supported");
        return GST_FLOW_ERROR;
      }

      if (image->numcomps == 4) {
        if (image->comps[3].dx != 1 || image->comps[3].dy != 1) {
          GST_ERROR_OBJECT (self, "Sub-sampling of alpha plane not supported");
          return GST_FLOW_NOT_NEGOTIATED;
        }

        if (get_highest_prec (image) == 8) {
          self->fill_frame = fill_frame_planar8_4_generic;
          format = GST_VIDEO_FORMAT_AYUV;
        } else if (image->comps[3].prec <= 16) {
          self->fill_frame = fill_frame_planar16_4_generic;
          format = GST_VIDEO_FORMAT_AYUV64;
        } else {
          GST_ERROR_OBJECT (self, "Unsupported depth %d", image->comps[0].prec);
          return GST_FLOW_NOT_NEGOTIATED;
        }
      } else if (image->numcomps == 3) {
        if (get_highest_prec (image) == 8) {
          if (image->comps[1].dx == 1 && image->comps[1].dy == 1) {
            self->fill_frame = fill_frame_planar8_3;
            format = GST_VIDEO_FORMAT_Y444;
          } else if (image->comps[1].dx == 2 && image->comps[1].dy == 1) {
            self->fill_frame = fill_frame_planar8_3;
            format = GST_VIDEO_FORMAT_Y42B;
          } else if (image->comps[1].dx == 2 && image->comps[1].dy == 2) {
            self->fill_frame = fill_frame_planar8_3;
            format = GST_VIDEO_FORMAT_I420;
          } else if (image->comps[1].dx == 4 && image->comps[1].dy == 1) {
            self->fill_frame = fill_frame_planar8_3;
            format = GST_VIDEO_FORMAT_Y41B;
          } else if (image->comps[1].dx == 4 && image->comps[1].dy == 4) {
            self->fill_frame = fill_frame_planar8_3;
            format = GST_VIDEO_FORMAT_YUV9;
          } else {
            self->fill_frame = fill_frame_planar8_3_generic;
            format = GST_VIDEO_FORMAT_AYUV;
          }
        } else if (get_highest_prec (image) <= 16) {
          if (image->comps[0].prec == 10 &&
              image->comps[1].prec == 10 && image->comps[2].prec == 10) {
            if (image->comps[1].dx == 1 && image->comps[1].dy == 1) {
              self->fill_frame = fill_frame_planar16_3;
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
              format = GST_VIDEO_FORMAT_Y444_10LE;
#else
              format = GST_VIDEO_FORMAT_Y444_10BE;
#endif
            } else if (image->comps[1].dx == 2 && image->comps[1].dy == 1) {
              self->fill_frame = fill_frame_planar16_3;
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
              format = GST_VIDEO_FORMAT_I422_10LE;
#else
              format = GST_VIDEO_FORMAT_I422_10BE;
#endif
            } else if (image->comps[1].dx == 2 && image->comps[1].dy == 2) {
              self->fill_frame = fill_frame_planar16_3;
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
              format = GST_VIDEO_FORMAT_I420_10LE;
#else
              format = GST_VIDEO_FORMAT_I420_10BE;
#endif
            } else {
              self->fill_frame = fill_frame_planar16_3_generic;
              format = GST_VIDEO_FORMAT_AYUV64;
            }
          } else {
            self->fill_frame = fill_frame_planar16_3_generic;
            format = GST_VIDEO_FORMAT_AYUV64;
          }
        } else {
          GST_ERROR_OBJECT (self, "Unsupported depth %d",
              get_highest_prec (image));
          return GST_FLOW_NOT_NEGOTIATED;
        }
      } else {
        GST_ERROR_OBJECT (self, "Unsupported number of YUV components: %d",
            image->numcomps);
        return GST_FLOW_NOT_NEGOTIATED;
      }
      break;
    default:
      GST_ERROR_OBJECT (self, "Unsupported colorspace %d", image->color_space);
      return GST_FLOW_NOT_NEGOTIATED;
  }

  width = image->x1 - image->x0;
  height = image->y1 - image->y0;

  if (!self->output_state ||
      self->output_state->info.finfo->format != format ||
      self->output_state->info.width != width ||
      self->output_state->info.height != height) {
    if (self->output_state)
      gst_video_codec_state_unref (self->output_state);
    self->output_state =
        gst_video_decoder_set_output_state (GST_VIDEO_DECODER (self), format,
        width, height, self->input_state);

    if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self)))
      return GST_FLOW_NOT_NEGOTIATED;
  }

  return GST_FLOW_OK;
}
예제 #17
0
static GstFlowReturn
gst_v4l2_video_dec_handle_frame (GstVideoDecoder * decoder,
    GstVideoCodecFrame * frame)
{
  GstV4l2Error error = GST_V4L2_ERROR_INIT;
  GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
  GstFlowReturn ret = GST_FLOW_OK;
  gboolean processed = FALSE;
  GstBuffer *tmp;

  GST_DEBUG_OBJECT (self, "Handling frame %d", frame->system_frame_number);

  if (G_UNLIKELY (!g_atomic_int_get (&self->active)))
    goto flushing;

  if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2output))) {
    if (!self->input_state)
      goto not_negotiated;
    if (!gst_v4l2_object_set_format (self->v4l2output, self->input_state->caps,
          &error))
      goto not_negotiated;
  }

  if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2capture))) {
    GstBufferPool *pool = GST_BUFFER_POOL (self->v4l2output->pool);
    GstVideoInfo info;
    GstVideoCodecState *output_state;
    GstBuffer *codec_data;
    GstCaps *acquired_caps, *available_caps, *caps, *filter;
    GstStructure *st;

    GST_DEBUG_OBJECT (self, "Sending header");

    codec_data = self->input_state->codec_data;

    /* We are running in byte-stream mode, so we don't know the headers, but
     * we need to send something, otherwise the decoder will refuse to
     * intialize.
     */
    if (codec_data) {
      gst_buffer_ref (codec_data);
    } else {
      codec_data = gst_buffer_ref (frame->input_buffer);
      processed = TRUE;
    }

    /* Ensure input internal pool is active */
    if (!gst_buffer_pool_is_active (pool)) {
      GstStructure *config = gst_buffer_pool_get_config (pool);
      gst_buffer_pool_config_set_params (config, self->input_state->caps,
          self->v4l2output->info.size, 2, 2);

      /* There is no reason to refuse this config */
      if (!gst_buffer_pool_set_config (pool, config))
        goto activate_failed;

      if (!gst_buffer_pool_set_active (pool, TRUE))
        goto activate_failed;
    }

    GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
    ret =
        gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->
            v4l2output->pool), &codec_data);
    GST_VIDEO_DECODER_STREAM_LOCK (decoder);

    gst_buffer_unref (codec_data);

    /* For decoders G_FMT returns coded size, G_SELECTION returns visible size
     * in the compose rectangle. gst_v4l2_object_acquire_format() checks both
     * and returns the visible size as with/height and the coded size as
     * padding. */
    if (!gst_v4l2_object_acquire_format (self->v4l2capture, &info))
      goto not_negotiated;

    /* Create caps from the acquired format, remove the format field */
    acquired_caps = gst_video_info_to_caps (&info);
    st = gst_caps_get_structure (acquired_caps, 0);
    gst_structure_remove_field (st, "format");

    /* Probe currently available pixel formats */
    available_caps = gst_v4l2_object_probe_caps (self->v4l2capture, NULL);
    available_caps = gst_caps_make_writable (available_caps);

    /* Replace coded size with visible size, we want to negotiate visible size
     * with downstream, not coded size. */
    gst_caps_map_in_place (available_caps, gst_v4l2_video_remove_padding, self);

    filter = gst_caps_intersect_full (available_caps, acquired_caps,
        GST_CAPS_INTERSECT_FIRST);
    gst_caps_unref (acquired_caps);
    gst_caps_unref (available_caps);
    caps = gst_pad_peer_query_caps (decoder->srcpad, filter);
    gst_caps_unref (filter);

    GST_DEBUG_OBJECT (self, "Possible decoded caps: %" GST_PTR_FORMAT, caps);
    if (gst_caps_is_empty (caps)) {
      gst_caps_unref (caps);
      goto not_negotiated;
    }

    /* Fixate pixel format */
    caps = gst_caps_fixate (caps);

    GST_DEBUG_OBJECT (self, "Chosen decoded caps: %" GST_PTR_FORMAT, caps);

    /* Try to set negotiated format, on success replace acquired format */
    if (gst_v4l2_object_set_format (self->v4l2capture, caps, &error))
      gst_video_info_from_caps (&info, caps);
    else
      gst_v4l2_clear_error (&error);
    gst_caps_unref (caps);

    output_state = gst_video_decoder_set_output_state (decoder,
        info.finfo->format, info.width, info.height, self->input_state);

    /* Copy the rest of the information, there might be more in the future */
    output_state->info.interlace_mode = info.interlace_mode;
    gst_video_codec_state_unref (output_state);

    if (!gst_video_decoder_negotiate (decoder)) {
      if (GST_PAD_IS_FLUSHING (decoder->srcpad))
        goto flushing;
      else
        goto not_negotiated;
    }

    /* Ensure our internal pool is activated */
    if (!gst_buffer_pool_set_active (GST_BUFFER_POOL (self->v4l2capture->pool),
            TRUE))
      goto activate_failed;
  }

  if (g_atomic_int_get (&self->processing) == FALSE) {
    /* It's possible that the processing thread stopped due to an error */
    if (self->output_flow != GST_FLOW_OK &&
        self->output_flow != GST_FLOW_FLUSHING) {
      GST_DEBUG_OBJECT (self, "Processing loop stopped with error, leaving");
      ret = self->output_flow;
      goto drop;
    }

    GST_DEBUG_OBJECT (self, "Starting decoding thread");

    /* Start the processing task, when it quits, the task will disable input
     * processing to unlock input if draining, or prevent potential block */
    g_atomic_int_set (&self->processing, TRUE);
    if (!gst_pad_start_task (decoder->srcpad,
            (GstTaskFunction) gst_v4l2_video_dec_loop, self,
            (GDestroyNotify) gst_v4l2_video_dec_loop_stopped))
      goto start_task_failed;
  }

  if (!processed) {
    GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
    ret =
        gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->v4l2output->
            pool), &frame->input_buffer);
    GST_VIDEO_DECODER_STREAM_LOCK (decoder);

    if (ret == GST_FLOW_FLUSHING) {
      if (g_atomic_int_get (&self->processing) == FALSE)
        ret = self->output_flow;
      goto drop;
    } else if (ret != GST_FLOW_OK) {
      goto process_failed;
    }
  }

  /* No need to keep input arround */
  tmp = frame->input_buffer;
  frame->input_buffer = gst_buffer_new ();
  gst_buffer_copy_into (frame->input_buffer, tmp,
      GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS |
      GST_BUFFER_COPY_META, 0, 0);
  gst_buffer_unref (tmp);

  gst_video_codec_frame_unref (frame);
  return ret;

  /* ERRORS */
not_negotiated:
  {
    GST_ERROR_OBJECT (self, "not negotiated");
    ret = GST_FLOW_NOT_NEGOTIATED;
    gst_v4l2_error (self, &error);
    goto drop;
  }
activate_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS,
        (_("Failed to allocate required memory.")),
        ("Buffer pool activation failed"));
    ret = GST_FLOW_ERROR;
    goto drop;
  }
flushing:
  {
    ret = GST_FLOW_FLUSHING;
    goto drop;
  }

start_task_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
        (_("Failed to start decoding thread.")), (NULL));
    g_atomic_int_set (&self->processing, FALSE);
    ret = GST_FLOW_ERROR;
    goto drop;
  }
process_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
        (_("Failed to process frame.")),
        ("Maybe be due to not enough memory or failing driver"));
    ret = GST_FLOW_ERROR;
    goto drop;
  }
drop:
  {
    gst_video_decoder_drop_frame (decoder, frame);
    return ret;
  }
}
예제 #18
0
static GstFlowReturn
gst_pnmdec_parse (GstVideoDecoder * decoder, GstVideoCodecFrame * frame,
    GstAdapter * adapter, gboolean at_eos)
{
  gsize size;
  GstPnmdec *s = GST_PNMDEC (decoder);
  GstFlowReturn r = GST_FLOW_OK;
  guint offset = 0;
  GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;
  const guint8 *raw_data;
  GstVideoCodecState *output_state;

  GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);

  size = gst_adapter_available (adapter);
  if (size < 8) {
    goto need_more_data;
  }
  raw_data = gst_adapter_map (adapter, size);

  if (s->mngr.info.fields != GST_PNM_INFO_FIELDS_ALL) {
    GstPnmInfoMngrResult res;

    res = gst_pnm_info_mngr_scan (&s->mngr, raw_data, size);

    switch (res) {
      case GST_PNM_INFO_MNGR_RESULT_FAILED:
        r = GST_FLOW_ERROR;
        goto out;
      case GST_PNM_INFO_MNGR_RESULT_READING:
        r = GST_FLOW_OK;
        goto out;
      case GST_PNM_INFO_MNGR_RESULT_FINISHED:
        switch (s->mngr.info.type) {
          case GST_PNM_TYPE_BITMAP:
            if (s->mngr.info.encoding == GST_PNM_ENCODING_ASCII) {
              r = GST_FLOW_ERROR;
              goto out;
            }
            s->size = s->mngr.info.width * s->mngr.info.height * 1;
            format = GST_VIDEO_FORMAT_GRAY8;
            break;
          case GST_PNM_TYPE_GRAYMAP:
            s->size = s->mngr.info.width * s->mngr.info.height * 1;
            format = GST_VIDEO_FORMAT_GRAY8;
            break;
          case GST_PNM_TYPE_PIXMAP:
            s->size = s->mngr.info.width * s->mngr.info.height * 3;
            format = GST_VIDEO_FORMAT_RGB;
            break;
        }
        output_state =
            gst_video_decoder_set_output_state (GST_VIDEO_DECODER (s), format,
            s->mngr.info.width, s->mngr.info.height, s->input_state);
        gst_video_codec_state_unref (output_state);
        if (gst_video_decoder_negotiate (GST_VIDEO_DECODER (s)) == FALSE) {
          r = GST_FLOW_NOT_NEGOTIATED;
          goto out;
        }

        if (s->mngr.info.encoding == GST_PNM_ENCODING_ASCII) {
          s->mngr.data_offset++;
          /* It is not possible to know the size of input ascii data to parse.
             So we have to parse and know the number of pixels parsed and
             then finally decide when we have full frame */
          s->buf = gst_buffer_new_and_alloc (s->size);
        }
        offset = s->mngr.data_offset;
        gst_adapter_flush (adapter, offset);
        size = size - offset;
    }
  }

  if (s->mngr.info.encoding == GST_PNM_ENCODING_ASCII) {
    /* Parse ASCII data dn populate s->current_size with the number of 
       bytes actually parsed from the input data */
    r = gst_pnmdec_parse_ascii (s, raw_data + offset, size);
  } else {
    /* Bitmap Contains 8 pixels in a byte */
    if (s->mngr.info.type == GST_PNM_TYPE_BITMAP)
      s->current_size += (size * 8);
    else
      s->current_size += size;
  }

  gst_video_decoder_add_to_frame (decoder, size);
  if (s->size <= s->current_size) {
    goto have_full_frame;
  }

need_more_data:
  return GST_VIDEO_DECODER_FLOW_NEED_DATA;

have_full_frame:
  return gst_video_decoder_have_frame (decoder);

out:
  return r;
}
예제 #19
0
static GstFlowReturn
gst_mpeg2dec_crop_buffer (GstMpeg2dec * dec, GstVideoCodecFrame * in_frame,
    GstVideoFrame * input_vframe)
{
  GstVideoCodecState *state;
  GstVideoInfo *info;
  GstVideoInfo *dinfo;
  guint c, n_planes;
  GstVideoFrame output_frame;
  GstFlowReturn ret;

  state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (dec));
  info = &state->info;
  dinfo = &dec->decoded_info;

  GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, dec,
      "Copying input buffer %ux%u (%" G_GSIZE_FORMAT ") to output buffer "
      "%ux%u (%" G_GSIZE_FORMAT ")", dinfo->width, dinfo->height,
      dinfo->size, info->width, info->height, info->size);

  ret =
      gst_video_decoder_allocate_output_frame (GST_VIDEO_DECODER (dec),
      in_frame);
  if (ret != GST_FLOW_OK)
    goto beach;

  if (!gst_video_frame_map (&output_frame, info, in_frame->output_buffer,
          GST_MAP_WRITE))
    goto map_fail;

  n_planes = GST_VIDEO_FRAME_N_PLANES (&output_frame);
  for (c = 0; c < n_planes; c++) {
    guint w, h, j;
    guint8 *sp, *dp;
    gint ss, ds;

    sp = GST_VIDEO_FRAME_PLANE_DATA (input_vframe, c);
    dp = GST_VIDEO_FRAME_PLANE_DATA (&output_frame, c);

    ss = GST_VIDEO_FRAME_PLANE_STRIDE (input_vframe, c);
    ds = GST_VIDEO_FRAME_PLANE_STRIDE (&output_frame, c);

    w = MIN (ABS (ss), ABS (ds));
    h = GST_VIDEO_FRAME_COMP_HEIGHT (&output_frame, c);

    GST_CAT_DEBUG (GST_CAT_PERFORMANCE, "copy plane %u, w:%u h:%u ", c, w, h);

    for (j = 0; j < h; j++) {
      memcpy (dp, sp, w);
      dp += ds;
      sp += ss;
    }
  }

  gst_video_frame_unmap (&output_frame);

  GST_BUFFER_FLAGS (in_frame->output_buffer) =
      GST_BUFFER_FLAGS (input_vframe->buffer);

beach:
  gst_video_codec_state_unref (state);

  return ret;

map_fail:
  {
    GST_ERROR_OBJECT (dec, "Failed to map output frame");
    gst_video_codec_state_unref (state);
    return GST_FLOW_ERROR;
  }
}
예제 #20
0
static gboolean
gst_openh264enc_set_format (GstVideoEncoder * encoder,
    GstVideoCodecState * state)
{
  GstOpenh264Enc *openh264enc = GST_OPENH264ENC (encoder);
  gchar *debug_caps;
  guint width, height, fps_n, fps_d;
  SEncParamExt enc_params;
  SliceModeEnum slice_mode = SM_SINGLE_SLICE;
  guint n_slices = 1;
  gint ret;
  GstCaps *outcaps;
  GstVideoCodecState *output_state;
  openh264enc->frame_count = 0;
  int video_format = videoFormatI420;

  debug_caps = gst_caps_to_string (state->caps);
  GST_DEBUG_OBJECT (openh264enc, "gst_e26d4_enc_set_format called, caps: %s",
      debug_caps);
  g_free (debug_caps);

  gst_openh264enc_stop (encoder);

  if (openh264enc->input_state) {
    gst_video_codec_state_unref (openh264enc->input_state);
  }
  openh264enc->input_state = gst_video_codec_state_ref (state);

  width = GST_VIDEO_INFO_WIDTH (&state->info);
  height = GST_VIDEO_INFO_HEIGHT (&state->info);
  fps_n = GST_VIDEO_INFO_FPS_N (&state->info);
  fps_d = GST_VIDEO_INFO_FPS_D (&state->info);

  if (openh264enc->encoder != NULL) {
    openh264enc->encoder->Uninitialize ();
    WelsDestroySVCEncoder (openh264enc->encoder);
    openh264enc->encoder = NULL;
  }
  WelsCreateSVCEncoder (&openh264enc->encoder);
  unsigned int uiTraceLevel = WELS_LOG_ERROR;
  openh264enc->encoder->SetOption (ENCODER_OPTION_TRACE_LEVEL, &uiTraceLevel);

  openh264enc->encoder->GetDefaultParams (&enc_params);

  enc_params.iUsageType = openh264enc->usage_type;
  enc_params.iPicWidth = width;
  enc_params.iPicHeight = height;
  enc_params.iTargetBitrate = openh264enc->bitrate;
  enc_params.iMaxBitrate = openh264enc->max_bitrate;
  enc_params.iRCMode = openh264enc->rate_control;
  enc_params.iTemporalLayerNum = 1;
  enc_params.iSpatialLayerNum = 1;
  enc_params.iLtrMarkPeriod = 30;
  enc_params.iMultipleThreadIdc = openh264enc->multi_thread;
  enc_params.bEnableDenoise = openh264enc->enable_denoise;
  enc_params.iComplexityMode = openh264enc->complexity;
  enc_params.uiIntraPeriod = openh264enc->gop_size;
  enc_params.bEnableBackgroundDetection = openh264enc->background_detection;
  enc_params.bEnableAdaptiveQuant = openh264enc->adaptive_quantization;
  enc_params.bEnableSceneChangeDetect = openh264enc->scene_change_detection;
  enc_params.bEnableFrameSkip = openh264enc->enable_frame_skip;
  enc_params.bEnableLongTermReference = 0;
#if OPENH264_MINOR >= 4
  enc_params.eSpsPpsIdStrategy = CONSTANT_ID;
#else
  enc_params.bEnableSpsPpsIdAddition = 0;
#endif
  enc_params.bPrefixNalAddingCtrl = 0;
  enc_params.fMaxFrameRate = fps_n * 1.0 / fps_d;
  enc_params.iLoopFilterDisableIdc = openh264enc->deblocking_mode;
  enc_params.sSpatialLayers[0].uiProfileIdc = PRO_BASELINE;
  enc_params.sSpatialLayers[0].iVideoWidth = enc_params.iPicWidth;
  enc_params.sSpatialLayers[0].iVideoHeight = enc_params.iPicHeight;
  enc_params.sSpatialLayers[0].fFrameRate = fps_n * 1.0 / fps_d;
  enc_params.sSpatialLayers[0].iSpatialBitrate = enc_params.iTargetBitrate;
  enc_params.sSpatialLayers[0].iMaxSpatialBitrate = enc_params.iMaxBitrate;

  if (openh264enc->slice_mode == GST_OPENH264_SLICE_MODE_N_SLICES) {
    if (openh264enc->num_slices == 1)
      slice_mode = SM_SINGLE_SLICE;
    else
      slice_mode = SM_FIXEDSLCNUM_SLICE;
     n_slices = openh264enc->num_slices;
  } else if (openh264enc->slice_mode == GST_OPENH264_SLICE_MODE_AUTO) {
#if OPENH264_MAJOR == 1 && OPENH264_MINOR < 6
    slice_mode = SM_AUTO_SLICE;
#else
    slice_mode = SM_FIXEDSLCNUM_SLICE;
    n_slices = 0;
#endif
  } else {
    GST_ERROR_OBJECT (openh264enc, "unexpected slice mode %d",
        openh264enc->slice_mode);
    slice_mode = SM_SINGLE_SLICE;
  }

#if OPENH264_MAJOR == 1 && OPENH264_MINOR < 6
  enc_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = slice_mode;
  enc_params.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceNum = n_slices;
#else
  enc_params.sSpatialLayers[0].sSliceArgument.uiSliceMode = slice_mode;
  enc_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = n_slices;
#endif

  openh264enc->framerate = (1 + fps_n / fps_d);

  ret = openh264enc->encoder->InitializeExt (&enc_params);

  if (ret != cmResultSuccess) {
    GST_ERROR_OBJECT (openh264enc, "failed to initialize encoder");
    return FALSE;
  }

  openh264enc->encoder->SetOption (ENCODER_OPTION_DATAFORMAT, &video_format);

  outcaps =
      gst_caps_copy (gst_static_pad_template_get_caps
      (&gst_openh264enc_src_template));

  output_state = gst_video_encoder_set_output_state (encoder, outcaps, state);
  gst_video_codec_state_unref (output_state);

  return gst_video_encoder_negotiate (encoder);
}
예제 #21
0
static GstFlowReturn
handle_slice (GstMpeg2dec * mpeg2dec, const mpeg2_info_t * info)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstVideoCodecFrame *frame;
  const mpeg2_picture_t *picture;
  gboolean key_frame = FALSE;
  GstVideoCodecState *state;

  GST_DEBUG_OBJECT (mpeg2dec,
      "fbuf:%p display_picture:%p current_picture:%p fbuf->id:%d",
      info->display_fbuf, info->display_picture, info->current_picture,
      GPOINTER_TO_INT (info->display_fbuf->id) - 1);

  /* Note, the fbuf-id is shifted by 1 to make the difference between
   * NULL values (used by dummy buffers) and 'real' values */
  frame = gst_video_decoder_get_frame (GST_VIDEO_DECODER (mpeg2dec),
      GPOINTER_TO_INT (info->display_fbuf->id) - 1);
  if (!frame)
    goto no_frame;
  picture = info->display_picture;
  key_frame = (picture->flags & PIC_MASK_CODING_TYPE) == PIC_FLAG_CODING_TYPE_I;

  GST_DEBUG_OBJECT (mpeg2dec, "picture flags: %d, type: %d, keyframe: %d",
      picture->flags, picture->flags & PIC_MASK_CODING_TYPE, key_frame);

  if (key_frame) {
    mpeg2_skip (mpeg2dec->decoder, 0);
  }

  if (mpeg2dec->discont_state == MPEG2DEC_DISC_NEW_KEYFRAME && key_frame)
    mpeg2dec->discont_state = MPEG2DEC_DISC_NONE;

  if (picture->flags & PIC_FLAG_SKIP) {
    GST_DEBUG_OBJECT (mpeg2dec, "dropping buffer because of skip flag");
    ret = gst_video_decoder_drop_frame (GST_VIDEO_DECODER (mpeg2dec), frame);
    mpeg2_skip (mpeg2dec->decoder, 1);
    return ret;
  }

  if (mpeg2dec->discont_state != MPEG2DEC_DISC_NONE) {
    GST_DEBUG_OBJECT (mpeg2dec, "dropping buffer, discont state %d",
        mpeg2dec->discont_state);
    ret = gst_video_decoder_drop_frame (GST_VIDEO_DECODER (mpeg2dec), frame);
    return ret;
  }

  state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (mpeg2dec));

  /* do cropping if the target region is smaller than the input one */
  if (mpeg2dec->need_cropping && !mpeg2dec->has_cropping) {
    GstVideoFrame *vframe;

    if (gst_video_decoder_get_max_decode_time (GST_VIDEO_DECODER (mpeg2dec),
            frame) < 0) {
      GST_DEBUG_OBJECT (mpeg2dec, "dropping buffer crop, too late");
      ret = gst_video_decoder_drop_frame (GST_VIDEO_DECODER (mpeg2dec), frame);
      goto beach;
    }

    GST_DEBUG_OBJECT (mpeg2dec, "cropping buffer");
    vframe = gst_mpeg2dec_get_buffer (mpeg2dec, frame->system_frame_number);
    g_assert (vframe != NULL);
    ret = gst_mpeg2dec_crop_buffer (mpeg2dec, frame, vframe);
  }

  ret = gst_video_decoder_finish_frame (GST_VIDEO_DECODER (mpeg2dec), frame);

beach:
  gst_video_codec_state_unref (state);
  return ret;

no_frame:
  {
    GST_DEBUG ("display buffer does not have a valid frame");
    return GST_FLOW_OK;
  }
}
예제 #22
0
static gboolean
gst_mfc_dec_set_format (GstVideoDecoder * decoder, GstVideoCodecState * state)
{
  GstMFCDec *self = GST_MFC_DEC (decoder);
  GstStructure *s;

  GST_DEBUG_OBJECT (self, "Setting format: %" GST_PTR_FORMAT, state->caps);

  if (self->input_state
      && gst_caps_can_intersect (self->input_state->caps, state->caps)) {
    GST_DEBUG_OBJECT (self, "Compatible caps");
    goto done;
  }

  s = gst_caps_get_structure (state->caps, 0);

  if (self->context) {
    mfc_dec_destroy (self->context);
    self->context = NULL;
  }
  self->initialized = FALSE;

  if (gst_structure_has_name (s, "video/x-h264")) {
    self->context = mfc_dec_create (CODEC_TYPE_H264);
    if (!self->context) {
      GST_ELEMENT_ERROR (self, LIBRARY, INIT,
          ("Failed to initialize MFC decoder context"), (NULL));
      return FALSE;
    }
  } else if (gst_structure_has_name (s, "video/mpeg")) {
    gint mpegversion;

    if (!gst_structure_get_int (s, "mpegversion", &mpegversion))
      return FALSE;
    if (mpegversion != 1 && mpegversion != 2 && mpegversion != 4)
      return FALSE;

    if (mpegversion == 1 || mpegversion == 2) {
      self->context = mfc_dec_create (CODEC_TYPE_MPEG2);
    } else {
      self->context = mfc_dec_create (CODEC_TYPE_MPEG4);
    }

    if (!self->context) {
      GST_ELEMENT_ERROR (self, LIBRARY, INIT,
          ("Failed to initialize MFC decoder context"), (NULL));
      return FALSE;
    }
  } else if (gst_structure_has_name (s, "video/x-h263")) {
    self->context = mfc_dec_create (CODEC_TYPE_H263);
    if (!self->context) {
      GST_ELEMENT_ERROR (self, LIBRARY, INIT,
          ("Failed to initialize MFC decoder context"), (NULL));
      return FALSE;
    }
  } else {
    g_return_val_if_reached (FALSE);
  }

  if (mfc_dec_init_input (self->context, 1) < 0) {
    GST_ELEMENT_ERROR (self, LIBRARY, INIT,
        ("Failed to initialize MFC decoder context input"), (NULL));
    return FALSE;
  }

  gst_buffer_replace (&self->codec_data, state->codec_data);

done:
  if (self->input_state)
    gst_video_codec_state_unref (self->input_state);
  self->input_state = gst_video_codec_state_ref (state);

  return TRUE;
}
예제 #23
0
static GstFlowReturn
gst_rsvg_decode_image (GstRsvgDec * rsvg, GstBuffer * buffer,
    GstVideoCodecFrame * frame)
{
  GstVideoDecoder *decoder = GST_VIDEO_DECODER (rsvg);
  GstFlowReturn ret = GST_FLOW_OK;
  cairo_t *cr;
  cairo_surface_t *surface;
  RsvgHandle *handle;
  GError *error = NULL;
  RsvgDimensionData dimension;
  gdouble scalex, scaley;
  GstMapInfo minfo;
  GstVideoFrame vframe;
  GstVideoCodecState *output_state;

  GST_LOG_OBJECT (rsvg, "parsing svg");

  if (!gst_buffer_map (buffer, &minfo, GST_MAP_READ)) {
    GST_ERROR_OBJECT (rsvg, "Failed to get SVG image");
    return GST_FLOW_ERROR;
  }
  handle = rsvg_handle_new_from_data (minfo.data, minfo.size, &error);
  if (!handle) {
    GST_ERROR_OBJECT (rsvg, "Failed to parse SVG image: %s", error->message);
    g_error_free (error);
    return GST_FLOW_ERROR;
  }

  rsvg_handle_get_dimensions (handle, &dimension);

  output_state = gst_video_decoder_get_output_state (decoder);
  if ((output_state == NULL)
      || GST_VIDEO_INFO_WIDTH (&output_state->info) != dimension.width
      || GST_VIDEO_INFO_HEIGHT (&output_state->info) != dimension.height) {

    /* Create the output state */
    if (output_state)
      gst_video_codec_state_unref (output_state);
    output_state =
        gst_video_decoder_set_output_state (decoder, GST_RSVG_VIDEO_FORMAT,
        dimension.width, dimension.height, rsvg->input_state);
  }

  ret = gst_video_decoder_allocate_output_frame (decoder, frame);

  if (ret != GST_FLOW_OK) {
    g_object_unref (handle);
    gst_video_codec_state_unref (output_state);
    GST_ERROR_OBJECT (rsvg, "Buffer allocation failed %s",
        gst_flow_get_name (ret));
    return ret;
  }

  GST_LOG_OBJECT (rsvg, "render image at %d x %d",
      GST_VIDEO_INFO_HEIGHT (&output_state->info),
      GST_VIDEO_INFO_WIDTH (&output_state->info));


  if (!gst_video_frame_map (&vframe,
          &output_state->info, frame->output_buffer, GST_MAP_READWRITE)) {
    GST_ERROR_OBJECT (rsvg, "Failed to get SVG image");
    g_object_unref (handle);
    gst_video_codec_state_unref (output_state);
    return GST_FLOW_ERROR;
  }
  surface =
      cairo_image_surface_create_for_data (GST_VIDEO_FRAME_PLANE_DATA (&vframe,
          0), CAIRO_FORMAT_ARGB32, GST_VIDEO_FRAME_WIDTH (&vframe),
      GST_VIDEO_FRAME_HEIGHT (&vframe), GST_VIDEO_FRAME_PLANE_STRIDE (&vframe,
          0));

  cr = cairo_create (surface);
  cairo_set_operator (cr, CAIRO_OPERATOR_CLEAR);
  cairo_set_source_rgba (cr, 1.0, 1.0, 1.0, 0.0);
  cairo_paint (cr);
  cairo_set_operator (cr, CAIRO_OPERATOR_OVER);
  cairo_set_source_rgba (cr, 0.0, 0.0, 0.0, 1.0);

  scalex = scaley = 1.0;
  if (GST_VIDEO_INFO_WIDTH (&output_state->info) != dimension.width) {
    scalex =
        ((gdouble) GST_VIDEO_INFO_WIDTH (&output_state->info)) /
        ((gdouble) dimension.width);
  }
  if (GST_VIDEO_INFO_HEIGHT (&output_state->info) != dimension.height) {
    scaley =
        ((gdouble) GST_VIDEO_INFO_HEIGHT (&output_state->info)) /
        ((gdouble) dimension.height);
  }
  cairo_scale (cr, scalex, scaley);
  rsvg_handle_render_cairo (handle, cr);

  g_object_unref (handle);
  cairo_destroy (cr);
  cairo_surface_destroy (surface);

  /* Now unpremultiply Cairo's ARGB to match GStreamer's */
  gst_rsvg_decode_unpremultiply (GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0),
      GST_VIDEO_FRAME_WIDTH (&vframe), GST_VIDEO_FRAME_HEIGHT (&vframe));

  gst_video_codec_state_unref (output_state);
  gst_buffer_unmap (buffer, &minfo);
  gst_video_frame_unmap (&vframe);

  return ret;
}
예제 #24
0
static GstFlowReturn
gst_mfc_dec_dequeue_output (GstMFCDec * self)
{
  GstFlowReturn ret = GST_FLOW_OK;
  gint mfc_ret;
  GstVideoCodecFrame *frame = NULL;
  GstBuffer *outbuf = NULL;
  struct mfc_buffer *mfc_outbuf = NULL;
  gint width, height;
  gint crop_left, crop_top, crop_width, crop_height;
  gint src_ystride, src_uvstride;
  GstVideoCodecState *state = NULL;
  gint64 deadline;
  struct timeval timestamp;

  if (!self->initialized) {
    GST_DEBUG_OBJECT (self, "Initializing decoder");
    if ((mfc_ret = mfc_dec_init_output (self->context, 1)) < 0)
      goto initialize_error;
    self->initialized = TRUE;
  }

  while ((mfc_ret = mfc_dec_output_available (self->context)) > 0) {
    GST_DEBUG_OBJECT (self, "Dequeueing output");

    mfc_dec_get_output_size (self->context, &width, &height);
    mfc_dec_get_output_stride (self->context, &src_ystride, &src_uvstride);
    mfc_dec_get_crop_size (self->context, &crop_left, &crop_top, &crop_width,
        &crop_height);

    GST_DEBUG_OBJECT (self, "Have output: width %d, height %d, "
        "Y stride %d, UV stride %d, "
        "crop_left %d, crop_right %d, "
        "crop_width %d, crop_height %d", width, height, src_ystride,
        src_uvstride, crop_left, crop_top, crop_width, crop_height);

    state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (self));

    if (!state || self->width != width || self->height != height ||
        self->src_stride[0] != src_ystride
        || self->src_stride[1] != src_uvstride
        || self->crop_left != self->crop_left || self->crop_top != crop_top
        || self->crop_width != crop_width || self->crop_height != crop_height) {
      self->width = width;
      self->height = height;
      self->crop_left = crop_left;
      self->crop_top = crop_top;
      self->crop_width = crop_width;
      self->crop_height = crop_height;
      self->src_stride[0] = src_ystride;
      self->src_stride[1] = src_uvstride;
      self->src_stride[2] = 0;

      if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self)))
        goto negotiate_error;

      if (state)
        gst_video_codec_state_unref (state);
      state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (self));
    }

    if ((mfc_ret =
            mfc_dec_dequeue_output (self->context, &mfc_outbuf,
                &timestamp)) < 0) {
      if (mfc_ret == -2) {
        GST_DEBUG_OBJECT (self, "Timeout dequeueing output, trying again");
        mfc_ret =
            mfc_dec_dequeue_output (self->context, &mfc_outbuf, &timestamp);
      }

      if (mfc_ret < 0)
        goto dequeue_error;
    }

    g_assert (mfc_outbuf != NULL);

    GST_DEBUG_OBJECT (self, "Got output buffer with ID %ld", timestamp.tv_sec);

    frame = NULL;
    if (timestamp.tv_sec != -1)
      frame =
          gst_video_decoder_get_frame (GST_VIDEO_DECODER (self),
          timestamp.tv_sec);

    if (frame) {
      deadline =
          gst_video_decoder_get_max_decode_time (GST_VIDEO_DECODER (self),
          frame);
      if (deadline < 0) {
        GST_LOG_OBJECT (self,
            "Dropping too late frame: deadline %" G_GINT64_FORMAT, deadline);
        ret = gst_video_decoder_drop_frame (GST_VIDEO_DECODER (self), frame);
        frame = NULL;
        outbuf = NULL;
        goto done;
      }

      ret =
          gst_video_decoder_allocate_output_frame (GST_VIDEO_DECODER (self),
          frame);

      if (ret != GST_FLOW_OK)
        goto alloc_error;

      outbuf = frame->output_buffer;
    } else {
      GST_WARNING_OBJECT (self, "Didn't find a frame for ID %ld",
          timestamp.tv_sec);

      outbuf =
          gst_video_decoder_allocate_output_buffer (GST_VIDEO_DECODER (self));

      if (!outbuf) {
        ret = GST_FLOW_ERROR;
        goto alloc_error;
      }
    }

    ret = gst_mfc_dec_fill_outbuf (self, outbuf, mfc_outbuf, state);
    if (ret != GST_FLOW_OK)
      goto fill_error;

    if (frame) {
      ret = gst_video_decoder_finish_frame (GST_VIDEO_DECODER (self), frame);
      frame = NULL;
      outbuf = NULL;
    } else {
      ret = gst_pad_push (GST_VIDEO_DECODER_SRC_PAD (self), outbuf);
      outbuf = NULL;
    }

    if (ret != GST_FLOW_OK)
      GST_INFO_OBJECT (self, "Pushing frame returned: %s",
          gst_flow_get_name (ret));

  done:
    if (mfc_outbuf) {
      if ((mfc_ret = mfc_dec_enqueue_output (self->context, mfc_outbuf)) < 0)
        goto enqueue_error;
    }

    if (!frame && outbuf)
      gst_buffer_unref (outbuf);
    if (frame)
      gst_video_codec_frame_unref (frame);
    if (state)
      gst_video_codec_state_unref (state);

    frame = NULL;
    outbuf = NULL;

    if (ret != GST_FLOW_OK)
      break;
  }

  return ret;

initialize_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, INIT, ("Failed to initialize output"),
        ("mfc_dec_init: %d", mfc_ret));
    ret = GST_FLOW_ERROR;
    goto done;
  }

negotiate_error:
  {
    GST_ELEMENT_ERROR (self, CORE, NEGOTIATION, ("Failed to negotiate"),
        (NULL));
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }

dequeue_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, FAILED,
        ("Failed to dequeue output buffer"), ("mfc_dec_dequeue_output: %d",
            mfc_ret));
    ret = GST_FLOW_ERROR;
    goto done;
  }

alloc_error:
  {
    GST_ELEMENT_ERROR (self, CORE, FAILED, ("Failed to allocate output buffer"),
        (NULL));
    ret = GST_FLOW_ERROR;
    goto done;
  }

fill_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, FAILED, ("Failed to fill output buffer"),
        (NULL));
    ret = GST_FLOW_ERROR;
    goto done;
  }

enqueue_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, FAILED,
        ("Failed to enqueue output buffer"), ("mfc_dec_enqueue_output: %d",
            mfc_ret));
    ret = GST_FLOW_ERROR;
    goto done;
  }
}
예제 #25
0
static gboolean
gst_vtdec_negotiate (GstVideoDecoder * decoder)
{
  GstVideoCodecState *output_state = NULL;
  GstCaps *peercaps = NULL, *caps = NULL, *templcaps = NULL, *prevcaps = NULL;
  GstVideoFormat format;
  GstStructure *structure;
  const gchar *s;
  GstVtdec *vtdec;
  OSStatus err = noErr;
  GstCapsFeatures *features = NULL;
  gboolean output_textures;

  vtdec = GST_VTDEC (decoder);
  if (vtdec->session)
    gst_vtdec_push_frames_if_needed (vtdec, TRUE, FALSE);

  output_state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (vtdec));
  if (output_state) {
    prevcaps = gst_caps_ref (output_state->caps);
    gst_video_codec_state_unref (output_state);
  }

  peercaps = gst_pad_peer_query_caps (GST_VIDEO_DECODER_SRC_PAD (vtdec), NULL);
  if (prevcaps && gst_caps_can_intersect (prevcaps, peercaps)) {
    /* The hardware decoder can become (temporarily) unavailable across
     * VTDecompressionSessionCreate/Destroy calls. So if the currently configured
     * caps are still accepted by downstream we keep them so we don't have to
     * destroy and recreate the session.
     */
    GST_INFO_OBJECT (vtdec,
        "current and peer caps are compatible, keeping current caps");
    caps = gst_caps_ref (prevcaps);
  } else {
    templcaps =
        gst_pad_get_pad_template_caps (GST_VIDEO_DECODER_SRC_PAD (decoder));
    caps =
        gst_caps_intersect_full (peercaps, templcaps, GST_CAPS_INTERSECT_FIRST);
    gst_caps_unref (templcaps);
  }
  gst_caps_unref (peercaps);

  caps = gst_caps_truncate (gst_caps_make_writable (caps));
  structure = gst_caps_get_structure (caps, 0);
  s = gst_structure_get_string (structure, "format");
  format = gst_video_format_from_string (s);
  features = gst_caps_get_features (caps, 0);
  if (features)
    features = gst_caps_features_copy (features);

  output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (vtdec),
      format, vtdec->video_info.width, vtdec->video_info.height,
      vtdec->input_state);
  output_state->caps = gst_video_info_to_caps (&output_state->info);
  if (features) {
    gst_caps_set_features (output_state->caps, 0, features);
    output_textures =
        gst_caps_features_contains (features,
        GST_CAPS_FEATURE_MEMORY_GL_MEMORY);
    if (output_textures)
      gst_caps_set_simple (output_state->caps, "texture-target", G_TYPE_STRING,
#if !HAVE_IOS
          GST_GL_TEXTURE_TARGET_RECTANGLE_STR,
#else
          GST_GL_TEXTURE_TARGET_2D_STR,
#endif
          NULL);
  }
  gst_caps_unref (caps);

  if (!prevcaps || !gst_caps_is_equal (prevcaps, output_state->caps)) {
    gboolean renegotiating = vtdec->session != NULL;

    GST_INFO_OBJECT (vtdec,
        "negotiated output format %" GST_PTR_FORMAT " previous %"
        GST_PTR_FORMAT, output_state->caps, prevcaps);

    if (vtdec->session)
      gst_vtdec_invalidate_session (vtdec);

    err = gst_vtdec_create_session (vtdec, format, TRUE);
    if (err == noErr) {
      GST_INFO_OBJECT (vtdec, "using hardware decoder");
    } else if (err == kVTVideoDecoderNotAvailableNowErr && renegotiating) {
      GST_WARNING_OBJECT (vtdec, "hw decoder not available anymore");
      err = gst_vtdec_create_session (vtdec, format, FALSE);
    }

    if (err != noErr) {
      GST_ELEMENT_ERROR (vtdec, RESOURCE, FAILED, (NULL),
          ("VTDecompressionSessionCreate returned %d", (int) err));
    }
  }

  if (vtdec->texture_cache != NULL && !output_textures) {
    gst_video_texture_cache_free (vtdec->texture_cache);
    vtdec->texture_cache = NULL;
  }

  if (err == noErr && output_textures) {
    /* call this regardless of whether caps have changed or not since a new
     * local context could have become available
     */
    gst_gl_context_helper_ensure_context (vtdec->ctxh);

    GST_INFO_OBJECT (vtdec, "pushing textures, context %p old context %p",
        vtdec->ctxh->context,
        vtdec->texture_cache ? vtdec->texture_cache->ctx : NULL);

    if (vtdec->texture_cache
        && vtdec->texture_cache->ctx != vtdec->ctxh->context) {
      gst_video_texture_cache_free (vtdec->texture_cache);
      vtdec->texture_cache = NULL;
    }
    if (!vtdec->texture_cache)
      setup_texture_cache (vtdec, vtdec->ctxh->context);
  }

  if (prevcaps)
    gst_caps_unref (prevcaps);

  if (err != noErr)
    return FALSE;

  return GST_VIDEO_DECODER_CLASS (gst_vtdec_parent_class)->negotiate (decoder);
}
예제 #26
0
static gboolean
gst_amc_video_enc_set_format (GstVideoEncoder * encoder,
    GstVideoCodecState * state)
{
  GstAmcVideoEnc *self;
  GstAmcFormat *format = NULL;
  GstCaps *allowed_caps = NULL;
  gboolean is_format_change = FALSE;
  gboolean needs_disable = FALSE;
  gchar *format_string;
  gboolean r = FALSE;
  GError *err = NULL;

  self = GST_AMC_VIDEO_ENC (encoder);

  GST_DEBUG_OBJECT (self, "Setting new caps %" GST_PTR_FORMAT, state->caps);

  /* Check if the caps change is a real format change or if only irrelevant
   * parts of the caps have changed or nothing at all.
   */
  is_format_change |= self->color_format_info.width != state->info.width;
  is_format_change |= self->color_format_info.height != state->info.height;
  needs_disable = self->started;

  /* If the component is not started and a real format change happens
   * we have to restart the component. If no real format change
   * happened we can just exit here.
   */
  if (needs_disable && !is_format_change) {

    /* Framerate or something minor changed */
    if (self->input_state)
      gst_video_codec_state_unref (self->input_state);
    self->input_state = gst_video_codec_state_ref (state);
    GST_DEBUG_OBJECT (self,
        "Already running and caps did not change the format");
    return TRUE;
  }

  if (needs_disable && is_format_change) {
    gst_amc_video_enc_drain (self);
    GST_VIDEO_ENCODER_STREAM_UNLOCK (self);
    gst_amc_video_enc_stop (GST_VIDEO_ENCODER (self));
    GST_VIDEO_ENCODER_STREAM_LOCK (self);
    gst_amc_video_enc_close (GST_VIDEO_ENCODER (self));
    if (!gst_amc_video_enc_open (GST_VIDEO_ENCODER (self))) {
      GST_ERROR_OBJECT (self, "Failed to open codec again");
      return FALSE;
    }

    if (!gst_amc_video_enc_start (GST_VIDEO_ENCODER (self))) {
      GST_ERROR_OBJECT (self, "Failed to start codec again");
    }
  }
  /* srcpad task is not running at this point */
  if (self->input_state)
    gst_video_codec_state_unref (self->input_state);
  self->input_state = NULL;

  GST_DEBUG_OBJECT (self, "picking an output format ...");
  allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
  if (!allowed_caps) {
    GST_DEBUG_OBJECT (self, "... but no peer, using template caps");
    allowed_caps =
        gst_pad_get_pad_template_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
  }
  GST_DEBUG_OBJECT (self, "chose caps %" GST_PTR_FORMAT, allowed_caps);
  allowed_caps = gst_caps_truncate (allowed_caps);

  format = create_amc_format (self, state, allowed_caps);
  if (!format)
    goto quit;

  format_string = gst_amc_format_to_string (format, &err);
  if (err)
    GST_ELEMENT_WARNING_FROM_ERROR (self, err);
  GST_DEBUG_OBJECT (self, "Configuring codec with format: %s",
      GST_STR_NULL (format_string));
  g_free (format_string);

  if (!gst_amc_codec_configure (self->codec, format, 1, &err)) {
    GST_ERROR_OBJECT (self, "Failed to configure codec");
    GST_ELEMENT_ERROR_FROM_ERROR (self, err);
    goto quit;
  }

  if (!gst_amc_codec_start (self->codec, &err)) {
    GST_ERROR_OBJECT (self, "Failed to start codec");
    GST_ELEMENT_ERROR_FROM_ERROR (self, err);
    goto quit;
  }

  self->amc_format = format;
  format = NULL;

  self->input_state = gst_video_codec_state_ref (state);

  self->started = TRUE;

  /* Start the srcpad loop again */
  self->flushing = FALSE;
  self->downstream_flow_ret = GST_FLOW_OK;
  gst_pad_start_task (GST_VIDEO_ENCODER_SRC_PAD (self),
      (GstTaskFunction) gst_amc_video_enc_loop, encoder, NULL);

  r = TRUE;

quit:
  if (allowed_caps)
    gst_object_unref (allowed_caps);

  if (format)
    gst_amc_format_free (format);

  return r;
}
예제 #27
0
static gboolean
gst_libde265_dec_set_format (GstVideoDecoder * decoder,
    GstVideoCodecState * state)
{
  GstLibde265Dec *dec = GST_LIBDE265_DEC (decoder);

  if (dec->input_state != NULL) {
    gst_video_codec_state_unref (dec->input_state);
  }
  dec->input_state = state;
  if (state != NULL) {
    gst_video_codec_state_ref (state);
  }
  if (state != NULL && state->caps != NULL) {
    GstStructure *str;
    const GValue *value;
    str = gst_caps_get_structure (state->caps, 0);
    if ((value = gst_structure_get_value (str, "codec_data"))) {
      GstMapInfo info;
      guint8 *data;
      gsize size;
      GstBuffer *buf;
      de265_error err;
      int more;

      buf = gst_value_get_buffer (value);
      if (!gst_buffer_map (buf, &info, GST_MAP_READ)) {
        GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
            ("Failed to map codec data"), (NULL));
        return FALSE;
      }
      data = info.data;
      size = info.size;
      free (dec->codec_data);
      dec->codec_data = malloc (size);
      g_assert (dec->codec_data != NULL);
      dec->codec_data_size = size;
      memcpy (dec->codec_data, data, size);
      if (size > 3 && (data[0] || data[1] || data[2] > 1)) {
        /* encoded in "hvcC" format (assume version 0) */
        dec->format = GST_TYPE_LIBDE265_FORMAT_PACKETIZED;
        if (size > 22) {
          int i;
          int num_param_sets;
          int pos;
          if (data[0] != 0) {
            GST_ELEMENT_WARNING (decoder, STREAM,
                DECODE, ("Unsupported extra data version %d, decoding may fail",
                    data[0]), (NULL));
          }
          dec->length_size = (data[21] & 3) + 1;
          num_param_sets = data[22];
          pos = 23;
          for (i = 0; i < num_param_sets; i++) {
            int j;
            int nal_count;
            if (pos + 3 > size) {
              GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
                  ("Buffer underrun in extra header (%d >= %" G_GSIZE_FORMAT
                      ")", pos + 3, size), (NULL));
              return FALSE;
            }
            /* ignore flags + NAL type (1 byte) */
            nal_count = data[pos + 1] << 8 | data[pos + 2];
            pos += 3;
            for (j = 0; j < nal_count; j++) {
              int nal_size;
              if (pos + 2 > size) {
                GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
                    ("Buffer underrun in extra nal header (%d >= %"
                        G_GSIZE_FORMAT ")", pos + 2, size), (NULL));
                return FALSE;
              }
              nal_size = data[pos] << 8 | data[pos + 1];
              if (pos + 2 + nal_size > size) {
                GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
                    ("Buffer underrun in extra nal (%d >= %" G_GSIZE_FORMAT ")",
                        pos + 2 + nal_size, size), (NULL));
                return FALSE;
              }
              err =
                  de265_push_NAL (dec->ctx, data + pos + 2, nal_size, 0, NULL);
              if (!de265_isOK (err)) {
                GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
                    ("Failed to push data: %s (%d)", de265_get_error_text (err),
                        err), (NULL));
                return FALSE;
              }
              pos += 2 + nal_size;
            }
          }
        }
        GST_DEBUG ("Assuming packetized data (%d bytes length)",
            dec->length_size);
      } else {
        dec->format = GST_TYPE_LIBDE265_FORMAT_BYTESTREAM;
        GST_DEBUG_OBJECT (dec, "Assuming non-packetized data");
        err = de265_push_data (dec->ctx, data, size, 0, NULL);
        if (!de265_isOK (err)) {
          gst_buffer_unmap (buf, &info);
          GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
              ("Failed to push codec data: %s (code=%d)",
                  de265_get_error_text (err), err), (NULL));
          return FALSE;
        }
      }
      gst_buffer_unmap (buf, &info);
      de265_push_end_of_NAL (dec->ctx);
      do {
        err = de265_decode (dec->ctx, &more);
        switch (err) {
          case DE265_OK:
            break;

          case DE265_ERROR_IMAGE_BUFFER_FULL:
          case DE265_ERROR_WAITING_FOR_INPUT_DATA:
            /* not really an error */
            more = 0;
            break;

          default:
            if (!de265_isOK (err)) {
              GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
                  ("Failed to decode codec data: %s (code=%d)",
                      de265_get_error_text (err), err), (NULL));
              return FALSE;
            }
        }
      } while (more);
    } else if ((value = gst_structure_get_value (str, "stream-format"))) {
      const gchar *str = g_value_get_string (value);
      if (strcmp (str, "byte-stream") == 0) {
        dec->format = GST_TYPE_LIBDE265_FORMAT_BYTESTREAM;
        GST_DEBUG_OBJECT (dec, "Assuming raw byte-stream");
      }
    }
  }

  return TRUE;
}
예제 #28
0
static GstFlowReturn
gst_amc_video_enc_handle_output_frame (GstAmcVideoEnc * self,
    GstAmcBuffer * buf, const GstAmcBufferInfo * buffer_info,
    GstVideoCodecFrame * frame)
{
  GstFlowReturn flow_ret = GST_FLOW_OK;
  GstVideoEncoder *encoder = GST_VIDEO_ENCODER_CAST (self);

  /* The BUFFER_FLAG_CODEC_CONFIG logic is borrowed from
   * gst-omx. see *_handle_output_frame in
   * gstomxvideoenc.c and gstomxh264enc.c */
  if ((buffer_info->flags & BUFFER_FLAG_CODEC_CONFIG)
      && buffer_info->size > 0) {
    GstStructure *s;
    GstVideoCodecState *state;

    state = gst_video_encoder_get_output_state (encoder);
    s = gst_caps_get_structure (state->caps, 0);
    if (!strcmp (gst_structure_get_name (s), "video/x-h264")) {
      gst_video_codec_state_unref (state);

      if (buffer_info->size > 4 &&
          GST_READ_UINT32_BE (buf->data + buffer_info->offset) == 0x00000001) {
        GList *l = NULL;
        GstBuffer *hdrs;

        GST_DEBUG_OBJECT (self, "got codecconfig in byte-stream format");

        hdrs = gst_buffer_new_and_alloc (buffer_info->size);
        gst_buffer_fill (hdrs, 0, buf->data + buffer_info->offset,
            buffer_info->size);

        l = g_list_append (l, hdrs);
        gst_video_encoder_set_headers (encoder, l);
      }
    } else {
      GstBuffer *codec_data;

      GST_DEBUG_OBJECT (self, "Handling codec data");

      codec_data = gst_buffer_new_and_alloc (buffer_info->size);
      gst_buffer_fill (codec_data, 0, buf->data + buffer_info->offset,
          buffer_info->size);
      state->codec_data = codec_data;
      gst_video_codec_state_unref (state);

      if (!gst_video_encoder_negotiate (encoder)) {
        gst_video_codec_frame_unref (frame);
        return GST_FLOW_NOT_NEGOTIATED;
      }

      return GST_FLOW_OK;
    }
  }

  if (buffer_info->size > 0) {
    GstBuffer *out_buf;
    GstPad *srcpad;

    srcpad = GST_VIDEO_ENCODER_SRC_PAD (encoder);
    out_buf =
        gst_video_encoder_allocate_output_buffer (encoder, buffer_info->size);
    gst_buffer_fill (out_buf, 0, buf->data + buffer_info->offset,
        buffer_info->size);

    GST_BUFFER_PTS (out_buf) =
        gst_util_uint64_scale (buffer_info->presentation_time_us, GST_USECOND,
        1);

    if (frame) {
      frame->output_buffer = out_buf;
      flow_ret = gst_video_encoder_finish_frame (encoder, frame);
    } else {
      /* This sometimes happens at EOS or if the input is not properly framed,
       * let's handle it gracefully by allocating a new buffer for the current
       * caps and filling it
       */

      GST_ERROR_OBJECT (self, "No corresponding frame found");
      flow_ret = gst_pad_push (srcpad, out_buf);
    }
  } else if (frame) {
    flow_ret = gst_video_encoder_finish_frame (encoder, frame);
  }

  return flow_ret;
}
예제 #29
0
static gboolean
theora_dec_set_format (GstVideoDecoder * bdec, GstVideoCodecState * state)
{
  GstTheoraDec *dec;

  dec = GST_THEORA_DEC (bdec);

  /* Keep a copy of the input state */
  if (dec->input_state)
    gst_video_codec_state_unref (dec->input_state);
  dec->input_state = gst_video_codec_state_ref (state);

  /* FIXME : Interesting, we always accept any kind of caps ? */
  if (state->codec_data) {
    GstBuffer *buffer;
    GstMapInfo minfo;
    guint8 *data;
    guint size;
    guint offset;

    buffer = state->codec_data;
    gst_buffer_map (buffer, &minfo, GST_MAP_READ);

    offset = 0;
    size = minfo.size;
    data = (guint8 *) minfo.data;

    while (size > 2) {
      guint psize;
      GstBuffer *buf;

      psize = (data[0] << 8) | data[1];
      /* skip header */
      data += 2;
      size -= 2;
      offset += 2;

      /* make sure we don't read too much */
      psize = MIN (psize, size);

      buf = gst_buffer_copy_region (buffer, GST_BUFFER_COPY_ALL, offset, psize);

      /* first buffer is a discont buffer */
      if (offset == 2)
        GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);

      /* now feed it to the decoder we can ignore the error */
      theora_dec_decode_buffer (dec, buf, NULL);
      gst_buffer_unref (buf);

      /* skip the data */
      size -= psize;
      data += psize;
      offset += psize;
    }

    gst_buffer_unmap (buffer, &minfo);
  }

  GST_DEBUG_OBJECT (dec, "Done");

  return TRUE;
}
예제 #30
0
static GstFlowReturn
theora_enc_handle_frame (GstVideoEncoder * benc, GstVideoCodecFrame * frame)
{
    GstTheoraEnc *enc;
    ogg_packet op;
    GstClockTime timestamp, running_time;
    GstFlowReturn ret;
    gboolean force_keyframe;

    enc = GST_THEORA_ENC (benc);

    /* we keep track of two timelines.
     * - The timestamps from the incomming buffers, which we copy to the outgoing
     *   encoded buffers as-is. We need to do this as we simply forward the
     *   newsegment events.
     * - The running_time of the buffers, which we use to construct the granulepos
     *   in the packets.
     */
    timestamp = frame->pts;

    /* incoming buffers are clipped, so this should be positive */
    running_time =
        gst_segment_to_running_time (&GST_VIDEO_ENCODER_INPUT_SEGMENT (enc),
                                     GST_FORMAT_TIME, timestamp);
    g_return_val_if_fail (running_time >= 0 || timestamp < 0, GST_FLOW_ERROR);

    GST_OBJECT_LOCK (enc);
    if (enc->bitrate_changed) {
        long int bitrate = enc->video_bitrate;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_BITRATE, &bitrate,
                       sizeof (long int));
        enc->bitrate_changed = FALSE;
    }

    if (enc->quality_changed) {
        long int quality = enc->video_quality;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_QUALITY, &quality,
                       sizeof (long int));
        enc->quality_changed = FALSE;
    }

    /* see if we need to schedule a keyframe */
    force_keyframe = GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame);
    GST_OBJECT_UNLOCK (enc);

    if (enc->packetno == 0) {
        /* no packets written yet, setup headers */
        GstCaps *caps;
        GstBuffer *buf;
        GList *buffers = NULL;
        int result;
        GstVideoCodecState *state;

        enc->granulepos_offset = 0;
        enc->timestamp_offset = 0;

        GST_DEBUG_OBJECT (enc, "output headers");
        /* Theora streams begin with three headers; the initial header (with
           most of the codec setup parameters) which is mandated by the Ogg
           bitstream spec.  The second header holds any comment fields.  The
           third header holds the bitstream codebook.  We merely need to
           make the headers, then pass them to libtheora one at a time;
           libtheora handles the additional Ogg bitstream constraints */

        /* create the remaining theora headers */
        th_comment_clear (&enc->comment);
        th_comment_init (&enc->comment);

        while ((result =
                    th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
            buf = theora_enc_buffer_from_header_packet (enc, &op);
            buffers = g_list_prepend (buffers, buf);
        }
        if (result < 0) {
            g_list_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
            g_list_free (buffers);
            goto encoder_disabled;
        }

        buffers = g_list_reverse (buffers);

        /* mark buffers and put on caps */
        caps = gst_caps_new_empty_simple ("video/x-theora");
        caps = theora_set_header_on_caps (caps, buffers);
        state = gst_video_encoder_set_output_state (benc, caps, enc->input_state);

        GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, state->caps);

        gst_video_codec_state_unref (state);

        gst_video_encoder_negotiate (GST_VIDEO_ENCODER (enc));

        gst_video_encoder_set_headers (benc, buffers);

        theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
    }

    {
        th_ycbcr_buffer ycbcr;
        gint res;
        GstVideoFrame vframe;

        if (force_keyframe) {
            theora_enc_reset (enc);
            theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
        }

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
            if (!theora_enc_read_multipass_cache (enc)) {
                ret = GST_FLOW_ERROR;
                goto multipass_read_failed;
            }
        }

        gst_video_frame_map (&vframe, &enc->input_state->info, frame->input_buffer,
                             GST_MAP_READ);
        theora_enc_init_buffer (ycbcr, &vframe);

        res = th_encode_ycbcr_in (enc->encoder, ycbcr);
        gst_video_frame_unmap (&vframe);

        /* none of the failure cases can happen here */
        g_assert (res == 0);

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
            if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
                ret = GST_FLOW_ERROR;
                goto multipass_write_failed;
            }
        }

        ret = GST_FLOW_OK;
        while (th_encode_packetout (enc->encoder, 0, &op)) {
            ret = theora_push_packet (enc, &op);
            if (ret != GST_FLOW_OK)
                goto beach;
        }
    }

beach:
    gst_video_codec_frame_unref (frame);
    return ret;

    /* ERRORS */
multipass_read_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
multipass_write_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
encoder_disabled:
    {
        gst_video_codec_frame_unref (frame);
        GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
                           ("libtheora has been compiled with the encoder disabled"));
        return GST_FLOW_ERROR;
    }
}