static gboolean
gst_mfxpostproc_update_src_caps (GstMfxPostproc * vpp, GstCaps * caps,
    gboolean * caps_changed_ptr)
{
  GST_INFO_OBJECT (vpp, "new src caps = %" GST_PTR_FORMAT, caps);

  if (!video_info_update (caps, &vpp->srcpad_info, caps_changed_ptr))
    return FALSE;

  if (GST_VIDEO_INFO_FORMAT (&vpp->sinkpad_info) !=
      GST_VIDEO_INFO_FORMAT (&vpp->srcpad_info))
    vpp->flags |= GST_MFX_POSTPROC_FLAG_FORMAT;

  if ((vpp->width || vpp->height) &&
      vpp->width != GST_VIDEO_INFO_WIDTH (&vpp->sinkpad_info) &&
      vpp->height != GST_VIDEO_INFO_HEIGHT (&vpp->sinkpad_info))
    vpp->flags |= GST_MFX_POSTPROC_FLAG_SIZE;

  if (vpp->fps_n && gst_util_fraction_compare(
        GST_VIDEO_INFO_FPS_N (&vpp->srcpad_info),
        GST_VIDEO_INFO_FPS_D (&vpp->srcpad_info),
        GST_VIDEO_INFO_FPS_N (&vpp->sinkpad_info),
        GST_VIDEO_INFO_FPS_D (&vpp->sinkpad_info)))
    vpp->flags |= GST_MFX_POSTPROC_FLAG_FRC;

  return TRUE;
}
示例#2
0
/**
 * gst_video_info_is_equal:
 * @info: a #GstVideoInfo
 * @other: a #GstVideoInfo
 *
 * Compares two #GstVideoInfo and returns whether they are equal or not
 *
 * Returns: %TRUE if @info and @other are equal, else %FALSE.
 */
gboolean
gst_video_info_is_equal (const GstVideoInfo * info, const GstVideoInfo * other)
{
  gint i;

  if (GST_VIDEO_INFO_FORMAT (info) != GST_VIDEO_INFO_FORMAT (other))
    return FALSE;
  if (GST_VIDEO_INFO_INTERLACE_MODE (info) !=
      GST_VIDEO_INFO_INTERLACE_MODE (other))
    return FALSE;
  if (GST_VIDEO_INFO_FLAGS (info) != GST_VIDEO_INFO_FLAGS (other))
    return FALSE;
  if (GST_VIDEO_INFO_WIDTH (info) != GST_VIDEO_INFO_WIDTH (other))
    return FALSE;
  if (GST_VIDEO_INFO_HEIGHT (info) != GST_VIDEO_INFO_HEIGHT (other))
    return FALSE;
  if (GST_VIDEO_INFO_SIZE (info) != GST_VIDEO_INFO_SIZE (other))
    return FALSE;
  if (GST_VIDEO_INFO_PAR_N (info) != GST_VIDEO_INFO_PAR_N (other))
    return FALSE;
  if (GST_VIDEO_INFO_PAR_D (info) != GST_VIDEO_INFO_PAR_D (other))
    return FALSE;
  if (GST_VIDEO_INFO_FPS_N (info) != GST_VIDEO_INFO_FPS_N (other))
    return FALSE;
  if (GST_VIDEO_INFO_FPS_D (info) != GST_VIDEO_INFO_FPS_D (other))
    return FALSE;
  if (!gst_video_colorimetry_is_equal (&GST_VIDEO_INFO_COLORIMETRY (info),
          &GST_VIDEO_INFO_COLORIMETRY (other)))
    return FALSE;
  if (GST_VIDEO_INFO_CHROMA_SITE (info) != GST_VIDEO_INFO_CHROMA_SITE (other))
    return FALSE;
  if (GST_VIDEO_INFO_MULTIVIEW_MODE (info) !=
      GST_VIDEO_INFO_MULTIVIEW_MODE (other))
    return FALSE;
  if (GST_VIDEO_INFO_MULTIVIEW_FLAGS (info) !=
      GST_VIDEO_INFO_MULTIVIEW_FLAGS (other))
    return FALSE;
  if (GST_VIDEO_INFO_VIEWS (info) != GST_VIDEO_INFO_VIEWS (other))
    return FALSE;

  for (i = 0; i < info->finfo->n_planes; i++) {
    if (info->stride[i] != other->stride[i])
      return FALSE;
    if (info->offset[i] != other->offset[i])
      return FALSE;
  }

  return TRUE;
}
示例#3
0
bool gst_vlc_set_vout_fmt( GstVideoInfo *p_info, GstVideoAlignment *p_align,
        GstCaps *p_caps, decoder_t *p_dec )
{
    es_format_t *p_outfmt = &p_dec->fmt_out;
    video_format_t *p_voutfmt = &p_dec->fmt_out.video;
    GstStructure *p_str = gst_caps_get_structure( p_caps, 0 );
    vlc_fourcc_t i_chroma;
    int i_padded_width, i_padded_height;

    i_chroma = p_outfmt->i_codec = vlc_fourcc_GetCodecFromString(
            VIDEO_ES,
            gst_structure_get_string( p_str, "format" ) );
    if( !i_chroma )
    {
        msg_Err( p_dec, "video chroma type not supported" );
        return false;
    }

    i_padded_width = GST_VIDEO_INFO_WIDTH( p_info ) + p_align->padding_left +
        p_align->padding_right;
    i_padded_height = GST_VIDEO_INFO_HEIGHT( p_info ) + p_align->padding_top +
        p_align->padding_bottom;

    video_format_Setup( p_voutfmt, i_chroma, i_padded_width, i_padded_height,
            GST_VIDEO_INFO_WIDTH( p_info ), GST_VIDEO_INFO_HEIGHT( p_info ),
            GST_VIDEO_INFO_PAR_N( p_info ), GST_VIDEO_INFO_PAR_D( p_info ));
    p_voutfmt->i_x_offset = p_align->padding_left;
    p_voutfmt->i_y_offset = p_align->padding_top;

    p_voutfmt->i_frame_rate = GST_VIDEO_INFO_FPS_N( p_info );
    p_voutfmt->i_frame_rate_base = GST_VIDEO_INFO_FPS_D( p_info );

    return true;
}
示例#4
0
static inline GstBuffer *
gst_y4m_encode_get_stream_header (GstY4mEncode * filter, gboolean tff)
{
  gpointer header;
  GstBuffer *buf;
  gchar interlaced;
  gsize len;

  if (GST_VIDEO_INFO_IS_INTERLACED (&filter->info)) {
    if (tff)
      interlaced = 't';
    else
      interlaced = 'b';
  } else {
    interlaced = 'p';
  }

  header = g_strdup_printf ("YUV4MPEG2 C%s W%d H%d I%c F%d:%d A%d:%d\n",
      filter->colorspace, GST_VIDEO_INFO_WIDTH (&filter->info),
      GST_VIDEO_INFO_HEIGHT (&filter->info), interlaced,
      GST_VIDEO_INFO_FPS_N (&filter->info),
      GST_VIDEO_INFO_FPS_D (&filter->info),
      GST_VIDEO_INFO_PAR_N (&filter->info),
      GST_VIDEO_INFO_PAR_D (&filter->info));
  len = strlen (header);

  buf = gst_buffer_new ();
  gst_buffer_append_memory (buf,
      gst_memory_new_wrapped (0, header, len, 0, len, header, g_free));

  return buf;
}
gboolean
__gst_video_rawvideo_convert (GstVideoCodecState * state,
    GstFormat src_format, gint64 src_value,
    GstFormat * dest_format, gint64 * dest_value)
{
  gboolean res = FALSE;
  guint vidsize;
  guint fps_n, fps_d;

  g_return_val_if_fail (dest_format != NULL, FALSE);
  g_return_val_if_fail (dest_value != NULL, FALSE);

  if (src_format == *dest_format || src_value == 0 || src_value == -1) {
    *dest_value = src_value;
    return TRUE;
  }

  vidsize = GST_VIDEO_INFO_SIZE (&state->info);
  fps_n = GST_VIDEO_INFO_FPS_N (&state->info);
  fps_d = GST_VIDEO_INFO_FPS_D (&state->info);

  if (src_format == GST_FORMAT_BYTES &&
      *dest_format == GST_FORMAT_DEFAULT && vidsize) {
    /* convert bytes to frames */
    *dest_value = gst_util_uint64_scale_int (src_value, 1, vidsize);
    res = TRUE;
  } else if (src_format == GST_FORMAT_DEFAULT &&
      *dest_format == GST_FORMAT_BYTES && vidsize) {
    /* convert bytes to frames */
    *dest_value = src_value * vidsize;
    res = TRUE;
  } else if (src_format == GST_FORMAT_DEFAULT &&
      *dest_format == GST_FORMAT_TIME && fps_n) {
    /* convert frames to time */
    *dest_value = gst_util_uint64_scale (src_value, GST_SECOND * fps_d, fps_n);
    res = TRUE;
  } else if (src_format == GST_FORMAT_TIME &&
      *dest_format == GST_FORMAT_DEFAULT && fps_d) {
    /* convert time to frames */
    *dest_value = gst_util_uint64_scale (src_value, fps_n, GST_SECOND * fps_d);
    res = TRUE;
  } else if (src_format == GST_FORMAT_TIME &&
      *dest_format == GST_FORMAT_BYTES && fps_d && vidsize) {
    /* convert time to bytes */
    *dest_value = gst_util_uint64_scale (src_value,
        fps_n * (guint64) vidsize, GST_SECOND * fps_d);
    res = TRUE;
  } else if (src_format == GST_FORMAT_BYTES &&
      *dest_format == GST_FORMAT_TIME && fps_n && vidsize) {
    /* convert bytes to time */
    *dest_value = gst_util_uint64_scale (src_value,
        GST_SECOND * fps_d, fps_n * (guint64) vidsize);
    res = TRUE;
  }

  return res;
}
void
gst_video_info_change_format (GstVideoInfo * vip, GstVideoFormat format,
    guint width, guint height)
{
  GstVideoInfo vi = *vip;

  gst_video_info_set_format (vip, format, width, height);

  GST_VIDEO_INFO_INTERLACE_MODE (vip) = GST_VIDEO_INFO_INTERLACE_MODE (&vi);
  GST_VIDEO_FORMAT_INFO_FLAGS (vip) = GST_VIDEO_FORMAT_INFO_FLAGS (&vi);
  GST_VIDEO_INFO_VIEWS (vip) = GST_VIDEO_INFO_VIEWS (&vi);
  GST_VIDEO_INFO_PAR_N (vip) = GST_VIDEO_INFO_PAR_N (&vi);
  GST_VIDEO_INFO_PAR_D (vip) = GST_VIDEO_INFO_PAR_D (&vi);
  GST_VIDEO_INFO_FPS_N (vip) = GST_VIDEO_INFO_FPS_N (&vi);
  GST_VIDEO_INFO_FPS_D (vip) = GST_VIDEO_INFO_FPS_D (&vi);
  GST_VIDEO_INFO_MULTIVIEW_MODE (vip) = GST_VIDEO_INFO_MULTIVIEW_MODE (&vi);
  GST_VIDEO_INFO_MULTIVIEW_FLAGS (vip) = GST_VIDEO_INFO_MULTIVIEW_FLAGS (&vi);
}
示例#7
0
static gboolean
theora_enc_set_format (GstVideoEncoder * benc, GstVideoCodecState * state)
{
    GstTheoraEnc *enc = GST_THEORA_ENC (benc);
    GstVideoInfo *info = &state->info;

    enc->width = GST_VIDEO_INFO_WIDTH (info);
    enc->height = GST_VIDEO_INFO_HEIGHT (info);

    th_info_clear (&enc->info);
    th_info_init (&enc->info);
    /* Theora has a divisible-by-sixteen restriction for the encoded video size but
     * we can define a picture area using pic_width/pic_height */
    enc->info.frame_width = GST_ROUND_UP_16 (enc->width);
    enc->info.frame_height = GST_ROUND_UP_16 (enc->height);
    enc->info.pic_width = enc->width;
    enc->info.pic_height = enc->height;
    switch (GST_VIDEO_INFO_FORMAT (info)) {
    case GST_VIDEO_FORMAT_I420:
        enc->info.pixel_fmt = TH_PF_420;
        break;
    case GST_VIDEO_FORMAT_Y42B:
        enc->info.pixel_fmt = TH_PF_422;
        break;
    case GST_VIDEO_FORMAT_Y444:
        enc->info.pixel_fmt = TH_PF_444;
        break;
    default:
        g_assert_not_reached ();
    }

    enc->info.fps_numerator = enc->fps_n = GST_VIDEO_INFO_FPS_N (info);
    enc->info.fps_denominator = enc->fps_d = GST_VIDEO_INFO_FPS_D (info);
    enc->info.aspect_numerator = GST_VIDEO_INFO_PAR_N (info);
    enc->info.aspect_denominator = GST_VIDEO_INFO_PAR_D (info);

    enc->info.colorspace = TH_CS_UNSPECIFIED;

    /* Save input state */
    if (enc->input_state)
        gst_video_codec_state_unref (enc->input_state);
    enc->input_state = gst_video_codec_state_ref (state);

    /* as done in theora */
    enc->info.keyframe_granule_shift = _ilog (enc->keyframe_force - 1);
    GST_DEBUG_OBJECT (enc,
                      "keyframe_frequency_force is %d, granule shift is %d",
                      enc->keyframe_force, enc->info.keyframe_granule_shift);

    theora_enc_reset (enc);
    enc->initialised = TRUE;

    return TRUE;
}
示例#8
0
static void
gst_kms_sink_get_times (GstBaseSink * bsink, GstBuffer * buf,
    GstClockTime * start, GstClockTime * end)
{
  GstKMSSink *self;

  self = GST_KMS_SINK (bsink);

  if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
    *start = GST_BUFFER_TIMESTAMP (buf);
    if (GST_BUFFER_DURATION_IS_VALID (buf))
      *end = *start + GST_BUFFER_DURATION (buf);
    else {
      if (GST_VIDEO_INFO_FPS_N (&self->vinfo) > 0) {
        *end = *start +
            gst_util_uint64_scale_int (GST_SECOND,
            GST_VIDEO_INFO_FPS_D (&self->vinfo),
            GST_VIDEO_INFO_FPS_N (&self->vinfo));
      }
    }
  }
}
static void
gst_glimage_sink_get_times (GstBaseSink * bsink, GstBuffer * buf,
    GstClockTime * start, GstClockTime * end)
{
  GstGLImageSink *glimagesink;

  glimagesink = GST_GLIMAGE_SINK (bsink);

  if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
    *start = GST_BUFFER_TIMESTAMP (buf);
    if (GST_BUFFER_DURATION_IS_VALID (buf))
      *end = *start + GST_BUFFER_DURATION (buf);
    else {
      if (GST_VIDEO_INFO_FPS_N (&glimagesink->info) > 0) {
        *end = *start +
            gst_util_uint64_scale_int (GST_SECOND,
            GST_VIDEO_INFO_FPS_D (&glimagesink->info),
            GST_VIDEO_INFO_FPS_N (&glimagesink->info));
      }
    }
  }
}
示例#10
0
static gboolean
gst_pngdec_set_format (GstVideoDecoder * decoder, GstVideoCodecState * state)
{
  GstPngDec *pngdec = (GstPngDec *) decoder;
  GstVideoInfo *info = &state->info;

  if (pngdec->input_state)
    gst_video_codec_state_unref (pngdec->input_state);
  pngdec->input_state = gst_video_codec_state_ref (state);

  if (GST_VIDEO_INFO_FPS_N (info) != 1 && GST_VIDEO_INFO_FPS_D (info) != 1)
    gst_video_decoder_set_packetized (decoder, TRUE);
  else
    gst_video_decoder_set_packetized (decoder, FALSE);

  /* We'll set format later on */

  return TRUE;
}
示例#11
0
static gboolean
gst_jpeg_dec_set_format (GstVideoDecoder * dec, GstVideoCodecState * state)
{
  GstJpegDec *jpeg = GST_JPEG_DEC (dec);
  GstVideoInfo *info = &state->info;

  /* FIXME : previously jpegdec would handled input as packetized
   * if the framerate was present. Here we consider it packetized if
   * the fps is != 1/1 */
  if (GST_VIDEO_INFO_FPS_N (info) != 1 && GST_VIDEO_INFO_FPS_D (info) != 1)
    gst_video_decoder_set_packetized (dec, TRUE);
  else
    gst_video_decoder_set_packetized (dec, FALSE);

  if (jpeg->input_state)
    gst_video_codec_state_unref (jpeg->input_state);
  jpeg->input_state = gst_video_codec_state_ref (state);

  return TRUE;
}
示例#12
0
static void
gst_pvrvideosink_get_times (GstBaseSink * bsink, GstBuffer * buf,
    GstClockTime * start, GstClockTime * end)
{
  GstPVRVideoSink *pvrvideosink;

  pvrvideosink = GST_PVRVIDEOSINK (bsink);

  if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
    *start = GST_BUFFER_TIMESTAMP (buf);
    if (GST_BUFFER_DURATION_IS_VALID (buf)) {
      *end = *start + GST_BUFFER_DURATION (buf);
    } else {
      gint fps_n, fps_d;
      fps_n = GST_VIDEO_INFO_FPS_N (&pvrvideosink->info);
      fps_d = GST_VIDEO_INFO_FPS_D (&pvrvideosink->info);
      if (fps_n > 0) {
        *end = *start + gst_util_uint64_scale_int (GST_SECOND, fps_d, fps_n);
      }
    }
  }
}
示例#13
0
static gboolean
gst_smpte_setcaps (GstPad * pad, GstCaps * caps)
{
  GstSMPTE *smpte;
  gboolean ret;
  GstVideoInfo vinfo;

  smpte = GST_SMPTE (GST_PAD_PARENT (pad));

  gst_video_info_init (&vinfo);
  if (!gst_video_info_from_caps (&vinfo, caps))
    return FALSE;

  smpte->width = GST_VIDEO_INFO_WIDTH (&vinfo);
  smpte->height = GST_VIDEO_INFO_HEIGHT (&vinfo);
  smpte->fps_num = GST_VIDEO_INFO_FPS_N (&vinfo);
  smpte->fps_denom = GST_VIDEO_INFO_FPS_D (&vinfo);

  /* figure out the duration in frames */
  smpte->end_position = gst_util_uint64_scale (smpte->duration,
      smpte->fps_num, GST_SECOND * smpte->fps_denom);

  GST_DEBUG_OBJECT (smpte, "duration: %d frames", smpte->end_position);

  ret =
      gst_smpte_update_mask (smpte, smpte->type, smpte->invert, smpte->depth,
      smpte->width, smpte->height);

  if (pad == smpte->sinkpad1) {
    GST_DEBUG_OBJECT (smpte, "setting pad1 info");
    smpte->vinfo1 = vinfo;
  } else {
    GST_DEBUG_OBJECT (smpte, "setting pad2 info");
    smpte->vinfo2 = vinfo;
  }

  return ret;
}
示例#14
0
static GstFlowReturn
gst_inter_video_src_create (GstBaseSrc * src, guint64 offset, guint size,
    GstBuffer ** buf)
{
  GstInterVideoSrc *intervideosrc = GST_INTER_VIDEO_SRC (src);
  GstBuffer *buffer;

  GST_DEBUG_OBJECT (intervideosrc, "create");

  buffer = NULL;

  g_mutex_lock (intervideosrc->surface->mutex);
  if (intervideosrc->surface->video_buffer) {
    buffer = gst_buffer_ref (intervideosrc->surface->video_buffer);
    intervideosrc->surface->video_buffer_count++;
    if (intervideosrc->surface->video_buffer_count >= 30) {
      gst_buffer_unref (intervideosrc->surface->video_buffer);
      intervideosrc->surface->video_buffer = NULL;
    }
  }
  g_mutex_unlock (intervideosrc->surface->mutex);

  if (buffer == NULL) {
    GstMapInfo map;

    buffer =
        gst_buffer_new_and_alloc (GST_VIDEO_INFO_SIZE (&intervideosrc->info));

    gst_buffer_map (buffer, &map, GST_MAP_WRITE);
    memset (map.data, 16, GST_VIDEO_INFO_COMP_STRIDE (&intervideosrc->info, 0) *
        GST_VIDEO_INFO_COMP_HEIGHT (&intervideosrc->info, 0));

    memset (map.data + GST_VIDEO_INFO_COMP_OFFSET (&intervideosrc->info, 1),
        128,
        2 * GST_VIDEO_INFO_COMP_STRIDE (&intervideosrc->info, 1) *
        GST_VIDEO_INFO_COMP_HEIGHT (&intervideosrc->info, 1));
    gst_buffer_unmap (buffer, &map);
  }

  buffer = gst_buffer_make_writable (buffer);

  GST_BUFFER_TIMESTAMP (buffer) =
      gst_util_uint64_scale_int (GST_SECOND * intervideosrc->n_frames,
      GST_VIDEO_INFO_FPS_D (&intervideosrc->info),
      GST_VIDEO_INFO_FPS_N (&intervideosrc->info));
  GST_DEBUG_OBJECT (intervideosrc, "create ts %" GST_TIME_FORMAT,
      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)));
  GST_BUFFER_DURATION (buffer) =
      gst_util_uint64_scale_int (GST_SECOND * (intervideosrc->n_frames + 1),
      GST_VIDEO_INFO_FPS_D (&intervideosrc->info),
      GST_VIDEO_INFO_FPS_N (&intervideosrc->info)) -
      GST_BUFFER_TIMESTAMP (buffer);
  GST_BUFFER_OFFSET (buffer) = intervideosrc->n_frames;
  GST_BUFFER_OFFSET_END (buffer) = -1;
  GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DISCONT);
  if (intervideosrc->n_frames == 0) {
    GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DISCONT);
  }
  intervideosrc->n_frames++;

  *buf = buffer;

  return GST_FLOW_OK;
}
示例#15
0
static GstFlowReturn
gst_vp8_enc_pre_push (GstVideoEncoder * video_encoder,
    GstVideoCodecFrame * frame)
{
  GstVP8Enc *encoder;
  GstVPXEnc *vpx_enc;
  GstBuffer *buf;
  GstFlowReturn ret = GST_FLOW_OK;
  GstVP8EncUserData *user_data = gst_video_codec_frame_get_user_data (frame);
  GList *l;
  gint inv_count;
  GstVideoInfo *info;

  GST_DEBUG_OBJECT (video_encoder, "pre_push");

  encoder = GST_VP8_ENC (video_encoder);
  vpx_enc = GST_VPX_ENC (encoder);

  info = &vpx_enc->input_state->info;

  g_assert (user_data != NULL);

  for (inv_count = 0, l = user_data->invisible; l; inv_count++, l = l->next) {
    buf = l->data;
    l->data = NULL;

    /* FIXME : All of this should have already been handled by base classes, no ? */
    if (l == user_data->invisible
        && GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
      GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
      encoder->keyframe_distance = 0;
    } else {
      GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
      encoder->keyframe_distance++;
    }

    GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DECODE_ONLY);
    GST_BUFFER_TIMESTAMP (buf) = GST_BUFFER_TIMESTAMP (frame->output_buffer);
    GST_BUFFER_DURATION (buf) = 0;
    if (GST_VIDEO_INFO_FPS_D (info) == 0 || GST_VIDEO_INFO_FPS_N (info) == 0) {
      GST_BUFFER_OFFSET_END (buf) = GST_BUFFER_OFFSET_NONE;
      GST_BUFFER_OFFSET (buf) = GST_BUFFER_OFFSET_NONE;
    } else {
      GST_BUFFER_OFFSET_END (buf) =
          _to_granulepos (frame->presentation_frame_number + 1,
          inv_count, encoder->keyframe_distance);
      GST_BUFFER_OFFSET (buf) =
          gst_util_uint64_scale (frame->presentation_frame_number + 1,
          GST_SECOND * GST_VIDEO_INFO_FPS_D (info),
          GST_VIDEO_INFO_FPS_N (info));
    }

    ret = gst_pad_push (GST_VIDEO_ENCODER_SRC_PAD (video_encoder), buf);

    if (ret != GST_FLOW_OK) {
      GST_WARNING_OBJECT (encoder, "flow error %d", ret);
      goto done;
    }
  }

  buf = frame->output_buffer;

  /* FIXME : All of this should have already been handled by base classes, no ? */
  if (!user_data->invisible && GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
    GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
    encoder->keyframe_distance = 0;
  } else {
    GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
    encoder->keyframe_distance++;
  }

  if (GST_VIDEO_INFO_FPS_D (info) == 0 || GST_VIDEO_INFO_FPS_N (info) == 0) {
    GST_BUFFER_OFFSET_END (buf) = GST_BUFFER_OFFSET_NONE;
    GST_BUFFER_OFFSET (buf) = GST_BUFFER_OFFSET_NONE;
  } else {
    GST_BUFFER_OFFSET_END (buf) =
        _to_granulepos (frame->presentation_frame_number + 1, 0,
        encoder->keyframe_distance);
    GST_BUFFER_OFFSET (buf) =
        gst_util_uint64_scale (frame->presentation_frame_number + 1,
        GST_SECOND * GST_VIDEO_INFO_FPS_D (info), GST_VIDEO_INFO_FPS_N (info));
  }

  GST_LOG_OBJECT (video_encoder, "src ts: %" GST_TIME_FORMAT,
      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)));

done:
  return ret;
}
示例#16
0
static GstCaps *
gst_mfxpostproc_transform_caps_impl (GstBaseTransform * trans,
    GstPadDirection direction, GstCaps * caps)
{
  GstMfxPostproc *const vpp = GST_MFXPOSTPROC (trans);
  GstVideoInfo vi, peer_vi;
  GstVideoFormat out_format;
  GstCaps *out_caps, *peer_caps;
  GstMfxCapsFeature feature;
  const gchar *feature_str;
  guint width, height;

  /* Generate the sink pad caps, that could be fixated afterwards */
  if (direction == GST_PAD_SRC) {
    if (!ensure_allowed_sinkpad_caps (vpp))
      return NULL;
    return gst_caps_ref (vpp->allowed_sinkpad_caps);
  }

  /* Generate complete set of src pad caps if non-fixated sink pad
   * caps are provided */
  if (!gst_caps_is_fixed (caps)) {
    if (!ensure_allowed_srcpad_caps (vpp))
      return NULL;
    return gst_caps_ref (vpp->allowed_srcpad_caps);
  }

  /* Generate the expected src pad caps, from the current fixated
   * sink pad caps */
  if (!gst_video_info_from_caps (&vi, caps))
    return NULL;

  if (vpp->deinterlace_mode)
    GST_VIDEO_INFO_INTERLACE_MODE (&vi) = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;

  /* Update size from user-specified parameters */
  find_best_size (vpp, &vi, &width, &height);

  /* Update format from user-specified parameters */
  peer_caps = gst_pad_peer_query_caps (GST_BASE_TRANSFORM_SRC_PAD (trans),
      vpp->allowed_srcpad_caps);

  if (gst_caps_is_any (peer_caps) || gst_caps_is_empty (peer_caps))
    return peer_caps;
  if (!gst_caps_is_fixed (peer_caps))
    peer_caps = gst_caps_fixate (peer_caps);

  gst_video_info_from_caps (&peer_vi, peer_caps);
  out_format = GST_VIDEO_INFO_FPS_N (&peer_vi);

  /* Update width and height from the caps */
  if (GST_VIDEO_INFO_HEIGHT (&peer_vi) != 1 &&
      GST_VIDEO_INFO_WIDTH (&peer_vi) != 1)
    find_best_size(vpp, &peer_vi, &width, &height);

  if (vpp->format != DEFAULT_FORMAT)
    out_format = vpp->format;

  if (vpp->fps_n) {
    GST_VIDEO_INFO_FPS_N (&vi) = vpp->fps_n;
    GST_VIDEO_INFO_FPS_D (&vi) = vpp->fps_d;
    vpp->field_duration = gst_util_uint64_scale (GST_SECOND,
        vpp->fps_d, vpp->fps_n);
    if (DEFAULT_FRC_ALG == vpp->alg)
      vpp->alg = GST_MFX_FRC_PRESERVE_TIMESTAMP;
  }

  if (peer_caps)
    gst_caps_unref (peer_caps);

  feature =
      gst_mfx_find_preferred_caps_feature (GST_BASE_TRANSFORM_SRC_PAD (trans),
        &out_format);
  gst_video_info_change_format (&vi, out_format, width, height);

  out_caps = gst_video_info_to_caps (&vi);
  if (!out_caps)
    return NULL;


  if (feature) {
    feature_str = gst_mfx_caps_feature_to_string (feature);
    if (feature_str)
      gst_caps_set_features (out_caps, 0,
          gst_caps_features_new (feature_str, NULL));
  }

  if (vpp->format != out_format)
    vpp->format = out_format;

  return out_caps;
}
/* Return the possible output caps based on inputs and downstream prefs */
static GstCaps *
_update_caps (GstVideoAggregator * vagg, GstCaps * caps)
{
  GstGLStereoMix *mix = GST_GL_STEREO_MIX (vagg);
  GList *l;
  gint best_width = -1, best_height = -1;
  gdouble best_fps = -1, cur_fps;
  gint best_fps_n = 0, best_fps_d = 1;
  GstVideoInfo *mix_info;
  GstCaps *blend_caps, *tmp_caps;
  GstCaps *out_caps;

  GST_OBJECT_LOCK (vagg);

  for (l = GST_ELEMENT (vagg)->sinkpads; l; l = l->next) {
    GstVideoAggregatorPad *pad = l->data;
    GstVideoInfo tmp = pad->info;
    gint this_width, this_height;
    gint fps_n, fps_d;

    if (!pad->info.finfo)
      continue;

    /* This can happen if we release a pad and another pad hasn't been negotiated_caps yet */
    if (GST_VIDEO_INFO_FORMAT (&pad->info) == GST_VIDEO_FORMAT_UNKNOWN)
      continue;

    /* Convert to per-view width/height for unpacked forms */
    gst_video_multiview_video_info_change_mode (&tmp,
        GST_VIDEO_MULTIVIEW_MODE_SEPARATED, GST_VIDEO_MULTIVIEW_FLAGS_NONE);

    this_width = GST_VIDEO_INFO_WIDTH (&tmp);
    this_height = GST_VIDEO_INFO_HEIGHT (&tmp);
    fps_n = GST_VIDEO_INFO_FPS_N (&tmp);
    fps_d = GST_VIDEO_INFO_FPS_D (&tmp);

    GST_INFO_OBJECT (vagg, "Input pad %" GST_PTR_FORMAT
        " w %u h %u", pad, this_width, this_height);

    if (this_width == 0 || this_height == 0)
      continue;

    if (best_width < this_width)
      best_width = this_width;
    if (best_height < this_height)
      best_height = this_height;

    if (fps_d == 0)
      cur_fps = 0.0;
    else
      gst_util_fraction_to_double (fps_n, fps_d, &cur_fps);

    if (best_fps < cur_fps) {
      best_fps = cur_fps;
      best_fps_n = fps_n;
      best_fps_d = fps_d;
    }

    /* FIXME: Preserve PAR for at least one input when different sized inputs */
  }
  GST_OBJECT_UNLOCK (vagg);

  mix_info = &mix->mix_info;
  gst_video_info_set_format (mix_info, GST_VIDEO_FORMAT_RGBA, best_width,
      best_height);

  GST_VIDEO_INFO_FPS_N (mix_info) = best_fps_n;
  GST_VIDEO_INFO_FPS_D (mix_info) = best_fps_d;

  GST_VIDEO_INFO_MULTIVIEW_MODE (mix_info) = GST_VIDEO_MULTIVIEW_MODE_SEPARATED;
  GST_VIDEO_INFO_VIEWS (mix_info) = 2;

  /* FIXME: If input is marked as flipped or flopped, preserve those flags */
  GST_VIDEO_INFO_MULTIVIEW_FLAGS (mix_info) = GST_VIDEO_MULTIVIEW_FLAGS_NONE;

  /* Choose our output format based on downstream preferences */
  blend_caps = gst_video_info_to_caps (mix_info);

  gst_caps_set_features (blend_caps, 0,
      gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_GL_MEMORY));

  tmp_caps = get_converted_caps (GST_GL_STEREO_MIX (vagg), blend_caps);
  gst_caps_unref (blend_caps);

  out_caps = gst_caps_intersect (caps, tmp_caps);
  gst_caps_unref (tmp_caps);

  GST_DEBUG_OBJECT (vagg, "Possible output caps %" GST_PTR_FORMAT, out_caps);

  return out_caps;
}
示例#18
0
static gboolean
gst_vaapidecode_update_src_caps (GstVaapiDecode * decode)
{
  GstVideoDecoder *const vdec = GST_VIDEO_DECODER (decode);
  GstPad *const srcpad = GST_VIDEO_DECODER_SRC_PAD (vdec);
  GstCaps *allowed;
  GstVideoCodecState *state, *ref_state;
  GstVaapiCapsFeature feature;
  GstCapsFeatures *features;
  GstCaps *allocation_caps;
  GstVideoInfo *vi;
  GstVideoFormat format;
  GstClockTime latency;
  gint fps_d, fps_n;
  guint width, height;
  const gchar *format_str, *feature_str;

  if (!decode->input_state)
    return FALSE;

  ref_state = decode->input_state;

  format = GST_VIDEO_INFO_FORMAT (&decode->decoded_info);
  allowed = gst_vaapidecode_get_allowed_srcpad_caps (decode);
  feature = gst_vaapi_find_preferred_caps_feature (srcpad, allowed, &format);
  gst_caps_unref (allowed);

  if (feature == GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED)
    return FALSE;

#if (!USE_GLX && !USE_EGL)
  /* This is a very pathological situation. Should not happen. */
  if (feature == GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META)
    return FALSE;
#endif

  if ((feature == GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY ||
          feature == GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE)
      && format != GST_VIDEO_INFO_FORMAT (&decode->decoded_info)) {
    GST_FIXME_OBJECT (decode, "validate if driver can convert from %s to %s",
        gst_video_format_to_string (GST_VIDEO_INFO_FORMAT
            (&decode->decoded_info)), gst_video_format_to_string (format));
  }

  width = decode->display_width;
  height = decode->display_height;

  if (!width || !height) {
    width = GST_VIDEO_INFO_WIDTH (&ref_state->info);
    height = GST_VIDEO_INFO_HEIGHT (&ref_state->info);
  }

  state = gst_video_decoder_set_output_state (vdec, format, width, height,
      ref_state);
  if (!state)
    return FALSE;

  if (GST_VIDEO_INFO_WIDTH (&state->info) == 0
      || GST_VIDEO_INFO_HEIGHT (&state->info) == 0) {
    gst_video_codec_state_unref (state);
    return FALSE;
  }

  vi = &state->info;
  state->caps = gst_video_info_to_caps (vi);

  switch (feature) {
    case GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META:
    case GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE:{
      GstStructure *structure = gst_caps_get_structure (state->caps, 0);

      /* Remove chroma-site and colorimetry from src caps,
       * which is unnecessary on downstream if using VASurface
       */
      gst_structure_remove_fields (structure, "chroma-site", "colorimetry",
          NULL);

      feature_str = gst_vaapi_caps_feature_to_string (feature);
      features = gst_caps_features_new (feature_str, NULL);
      gst_caps_set_features (state->caps, 0, features);
      break;
    }
    default:
      break;
  }

  /* Allocation query is different from pad's caps */
  allocation_caps = NULL;
  if (GST_VIDEO_INFO_WIDTH (&decode->decoded_info) != width
      || GST_VIDEO_INFO_HEIGHT (&decode->decoded_info) != height) {
    allocation_caps = gst_caps_copy (state->caps);
    format_str = gst_video_format_to_string (format);
    gst_caps_set_simple (allocation_caps,
        "width", G_TYPE_INT, GST_VIDEO_INFO_WIDTH (&decode->decoded_info),
        "height", G_TYPE_INT, GST_VIDEO_INFO_HEIGHT (&decode->decoded_info),
        "format", G_TYPE_STRING, format_str, NULL);
    GST_INFO_OBJECT (decode, "new alloc caps = %" GST_PTR_FORMAT,
        allocation_caps);
  }
  gst_caps_replace (&state->allocation_caps, allocation_caps);
  if (allocation_caps)
    gst_caps_unref (allocation_caps);

  GST_INFO_OBJECT (decode, "new src caps = %" GST_PTR_FORMAT, state->caps);
  gst_caps_replace (&decode->srcpad_caps, state->caps);
  gst_video_codec_state_unref (state);

  fps_n = GST_VIDEO_INFO_FPS_N (vi);
  fps_d = GST_VIDEO_INFO_FPS_D (vi);
  if (fps_n <= 0 || fps_d <= 0) {
    GST_DEBUG_OBJECT (decode, "forcing 25/1 framerate for latency calculation");
    fps_n = 25;
    fps_d = 1;
  }

  /* For parsing/preparation purposes we'd need at least 1 frame
   * latency in general, with perfectly known unit boundaries (NALU,
   * AU), and up to 2 frames when we need to wait for the second frame
   * start to determine the first frame is complete */
  latency = gst_util_uint64_scale (2 * GST_SECOND, fps_d, fps_n);
  gst_video_decoder_set_latency (vdec, latency, latency);

  return TRUE;
}
static GstFlowReturn
gst_gl_test_src_fill (GstPushSrc * psrc, GstBuffer * buffer)
{
  GstGLTestSrc *src = GST_GL_TEST_SRC (psrc);
  GstClockTime next_time;
  GstVideoFrame out_frame;
  GstGLSyncMeta *sync_meta;

  if (G_UNLIKELY (!src->negotiated || !src->context))
    goto not_negotiated;

  /* 0 framerate and we are at the second frame, eos */
  if (G_UNLIKELY (GST_VIDEO_INFO_FPS_N (&src->out_info) == 0
          && src->n_frames == 1))
    goto eos;

  if (!gst_video_frame_map (&out_frame, &src->out_info, buffer,
          GST_MAP_WRITE | GST_MAP_GL)) {
    return GST_FLOW_NOT_NEGOTIATED;
  }

  src->out_tex = (GstGLMemory *) out_frame.map[0].memory;

  gst_gl_context_thread_add (src->context, (GstGLContextThreadFunc) _fill_gl,
      src);
  if (!src->gl_result) {
    gst_video_frame_unmap (&out_frame);
    goto gl_error;
  }
  gst_video_frame_unmap (&out_frame);
  if (!src->gl_result)
    goto gl_error;

  sync_meta = gst_buffer_get_gl_sync_meta (buffer);
  if (sync_meta)
    gst_gl_sync_meta_set_sync_point (sync_meta, src->context);

  GST_BUFFER_TIMESTAMP (buffer) = src->timestamp_offset + src->running_time;
  GST_BUFFER_OFFSET (buffer) = src->n_frames;
  src->n_frames++;
  GST_BUFFER_OFFSET_END (buffer) = src->n_frames;
  if (src->out_info.fps_n) {
    next_time = gst_util_uint64_scale_int (src->n_frames * GST_SECOND,
        src->out_info.fps_d, src->out_info.fps_n);
    GST_BUFFER_DURATION (buffer) = next_time - src->running_time;
  } else {
    next_time = src->timestamp_offset;
    /* NONE means forever */
    GST_BUFFER_DURATION (buffer) = GST_CLOCK_TIME_NONE;
  }

  src->running_time = next_time;

  return GST_FLOW_OK;

gl_error:
  {
    GST_ELEMENT_ERROR (src, RESOURCE, NOT_FOUND, (_("failed to draw pattern")),
        (_("A GL error occured")));
    return GST_FLOW_NOT_NEGOTIATED;
  }
not_negotiated:
  {
    GST_ELEMENT_ERROR (src, CORE, NEGOTIATION, (NULL),
        (_("format wasn't negotiated before get function")));
    return GST_FLOW_NOT_NEGOTIATED;
  }
eos:
  {
    GST_DEBUG_OBJECT (src, "eos: 0 framerate, frame %d", (gint) src->n_frames);
    return GST_FLOW_EOS;
  }
}
static gboolean
gst_vaapidecode_update_src_caps (GstVaapiDecode * decode)
{
  GstVideoDecoder *const vdec = GST_VIDEO_DECODER (decode);
  GstVideoCodecState *state, *ref_state;
  GstVideoInfo *vi;
  GstVideoFormat format = GST_VIDEO_FORMAT_I420;

  if (!decode->input_state)
    return FALSE;

  ref_state = decode->input_state;

  GstCapsFeatures *features = NULL;
  GstVaapiCapsFeature feature;

  feature =
      gst_vaapi_find_preferred_caps_feature (GST_VIDEO_DECODER_SRC_PAD (vdec),
      GST_VIDEO_INFO_FORMAT (&ref_state->info), &format);

  if (feature == GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED)
    return FALSE;

  switch (feature) {
#if (USE_GLX || USE_EGL)
    case GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META:
      features =
          gst_caps_features_new
          (GST_CAPS_FEATURE_META_GST_VIDEO_GL_TEXTURE_UPLOAD_META, NULL);
      break;
#endif
#if GST_CHECK_VERSION(1,3,1)
    case GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE:
      features =
          gst_caps_features_new (GST_CAPS_FEATURE_MEMORY_VAAPI_SURFACE, NULL);
      break;
#endif
    default:
      break;
  }

  state = gst_video_decoder_set_output_state (vdec, format,
      ref_state->info.width, ref_state->info.height, ref_state);
  if (!state || state->info.width == 0 || state->info.height == 0)
    return FALSE;

  vi = &state->info;

  state->caps = gst_video_info_to_caps (vi);
  if (features)
    gst_caps_set_features (state->caps, 0, features);
  GST_INFO_OBJECT (decode, "new src caps = %" GST_PTR_FORMAT, state->caps);
  gst_caps_replace (&decode->srcpad_caps, state->caps);
  gst_video_codec_state_unref (state);

  gint fps_n = GST_VIDEO_INFO_FPS_N (vi);
  gint fps_d = GST_VIDEO_INFO_FPS_D (vi);
  if (fps_n <= 0 || fps_d <= 0) {
    GST_DEBUG_OBJECT (decode, "forcing 25/1 framerate for latency calculation");
    fps_n = 25;
    fps_d = 1;
  }

  /* For parsing/preparation purposes we'd need at least 1 frame
   * latency in general, with perfectly known unit boundaries (NALU,
   * AU), and up to 2 frames when we need to wait for the second frame
   * start to determine the first frame is complete */
  GstClockTime latency = gst_util_uint64_scale (2 * GST_SECOND, fps_d, fps_n);
  gst_video_decoder_set_latency (vdec, latency, latency);

  return TRUE;
}
static gboolean
gst_openh264enc_set_format (GstVideoEncoder * encoder,
    GstVideoCodecState * state)
{
  GstOpenh264Enc *openh264enc = GST_OPENH264ENC (encoder);
  gchar *debug_caps;
  guint width, height, fps_n, fps_d;
  SEncParamExt enc_params;
  SliceModeEnum slice_mode = SM_SINGLE_SLICE;
  guint n_slices = 1;
  gint ret;
  GstCaps *outcaps;
  GstVideoCodecState *output_state;
  openh264enc->frame_count = 0;
  int video_format = videoFormatI420;

  debug_caps = gst_caps_to_string (state->caps);
  GST_DEBUG_OBJECT (openh264enc, "gst_e26d4_enc_set_format called, caps: %s",
      debug_caps);
  g_free (debug_caps);

  gst_openh264enc_stop (encoder);

  if (openh264enc->input_state) {
    gst_video_codec_state_unref (openh264enc->input_state);
  }
  openh264enc->input_state = gst_video_codec_state_ref (state);

  width = GST_VIDEO_INFO_WIDTH (&state->info);
  height = GST_VIDEO_INFO_HEIGHT (&state->info);
  fps_n = GST_VIDEO_INFO_FPS_N (&state->info);
  fps_d = GST_VIDEO_INFO_FPS_D (&state->info);

  if (openh264enc->encoder != NULL) {
    openh264enc->encoder->Uninitialize ();
    WelsDestroySVCEncoder (openh264enc->encoder);
    openh264enc->encoder = NULL;
  }
  WelsCreateSVCEncoder (&openh264enc->encoder);
  unsigned int uiTraceLevel = WELS_LOG_ERROR;
  openh264enc->encoder->SetOption (ENCODER_OPTION_TRACE_LEVEL, &uiTraceLevel);

  openh264enc->encoder->GetDefaultParams (&enc_params);

  enc_params.iUsageType = openh264enc->usage_type;
  enc_params.iPicWidth = width;
  enc_params.iPicHeight = height;
  enc_params.iTargetBitrate = openh264enc->bitrate;
  enc_params.iMaxBitrate = openh264enc->max_bitrate;
  enc_params.iRCMode = openh264enc->rate_control;
  enc_params.iTemporalLayerNum = 1;
  enc_params.iSpatialLayerNum = 1;
  enc_params.iLtrMarkPeriod = 30;
  enc_params.iMultipleThreadIdc = openh264enc->multi_thread;
  enc_params.bEnableDenoise = openh264enc->enable_denoise;
  enc_params.iComplexityMode = openh264enc->complexity;
  enc_params.uiIntraPeriod = openh264enc->gop_size;
  enc_params.bEnableBackgroundDetection = openh264enc->background_detection;
  enc_params.bEnableAdaptiveQuant = openh264enc->adaptive_quantization;
  enc_params.bEnableSceneChangeDetect = openh264enc->scene_change_detection;
  enc_params.bEnableFrameSkip = openh264enc->enable_frame_skip;
  enc_params.bEnableLongTermReference = 0;
#if OPENH264_MINOR >= 4
  enc_params.eSpsPpsIdStrategy = CONSTANT_ID;
#else
  enc_params.bEnableSpsPpsIdAddition = 0;
#endif
  enc_params.bPrefixNalAddingCtrl = 0;
  enc_params.fMaxFrameRate = fps_n * 1.0 / fps_d;
  enc_params.iLoopFilterDisableIdc = openh264enc->deblocking_mode;
  enc_params.sSpatialLayers[0].uiProfileIdc = PRO_BASELINE;
  enc_params.sSpatialLayers[0].iVideoWidth = enc_params.iPicWidth;
  enc_params.sSpatialLayers[0].iVideoHeight = enc_params.iPicHeight;
  enc_params.sSpatialLayers[0].fFrameRate = fps_n * 1.0 / fps_d;
  enc_params.sSpatialLayers[0].iSpatialBitrate = enc_params.iTargetBitrate;
  enc_params.sSpatialLayers[0].iMaxSpatialBitrate = enc_params.iMaxBitrate;

  if (openh264enc->slice_mode == GST_OPENH264_SLICE_MODE_N_SLICES) {
    if (openh264enc->num_slices == 1)
      slice_mode = SM_SINGLE_SLICE;
    else
      slice_mode = SM_FIXEDSLCNUM_SLICE;
     n_slices = openh264enc->num_slices;
  } else if (openh264enc->slice_mode == GST_OPENH264_SLICE_MODE_AUTO) {
#if OPENH264_MAJOR == 1 && OPENH264_MINOR < 6
    slice_mode = SM_AUTO_SLICE;
#else
    slice_mode = SM_FIXEDSLCNUM_SLICE;
    n_slices = 0;
#endif
  } else {
    GST_ERROR_OBJECT (openh264enc, "unexpected slice mode %d",
        openh264enc->slice_mode);
    slice_mode = SM_SINGLE_SLICE;
  }

#if OPENH264_MAJOR == 1 && OPENH264_MINOR < 6
  enc_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = slice_mode;
  enc_params.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceNum = n_slices;
#else
  enc_params.sSpatialLayers[0].sSliceArgument.uiSliceMode = slice_mode;
  enc_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = n_slices;
#endif

  openh264enc->framerate = (1 + fps_n / fps_d);

  ret = openh264enc->encoder->InitializeExt (&enc_params);

  if (ret != cmResultSuccess) {
    GST_ERROR_OBJECT (openh264enc, "failed to initialize encoder");
    return FALSE;
  }

  openh264enc->encoder->SetOption (ENCODER_OPTION_DATAFORMAT, &video_format);

  outcaps =
      gst_caps_copy (gst_static_pad_template_get_caps
      (&gst_openh264enc_src_template));

  output_state = gst_video_encoder_set_output_state (encoder, outcaps, state);
  gst_video_codec_state_unref (output_state);

  return gst_video_encoder_negotiate (encoder);
}
示例#22
0
static void
gst_raw_video_parse_set_property (GObject * object, guint prop_id,
    GValue const *value, GParamSpec * pspec)
{
  GstBaseParse *base_parse = GST_BASE_PARSE (object);
  GstRawBaseParse *raw_base_parse = GST_RAW_BASE_PARSE (object);
  GstRawVideoParse *raw_video_parse = GST_RAW_VIDEO_PARSE (object);
  GstRawVideoParseConfig *props_cfg = &(raw_video_parse->properties_config);

  /* All properties are handled similarly:
   * - if the new value is the same as the current value, nothing is done
   * - the parser lock is held while the new value is set
   * - if the properties config is the current config, the source caps are
   *   invalidated to ensure that the code in handle_frame pushes a new CAPS
   *   event out
   * - properties that affect the video frame size call the function to update
   *   the info and also call gst_base_parse_set_min_frame_size() to ensure
   *   that the minimum frame size can hold 1 frame (= one sample for each
   *   channel); to ensure that the min frame size includes any extra padding,
   *   it is set to the result of gst_raw_video_parse_get_config_frame_size()
   * - property configuration values that require video info updates aren't
   *   written directory into the video info structure, but in the extra
   *   fields instead (gst_raw_video_parse_update_info() then copies the values
   *   from these fields into the video info); see the documentation inside
   *   gst_raw_video_parse_update_info() for the reason why
   */

  switch (prop_id) {
    case PROP_WIDTH:
    {
      gint new_width = g_value_get_int (value);

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object);

      if (new_width != props_cfg->width) {
        props_cfg->width = new_width;
        gst_raw_video_parse_update_info (props_cfg);

        if (!gst_raw_video_parse_is_using_sink_caps (raw_video_parse)) {
          gst_raw_base_parse_invalidate_src_caps (raw_base_parse);
          gst_base_parse_set_min_frame_size (base_parse,
              gst_raw_video_parse_get_config_frame_size (raw_base_parse,
                  GST_RAW_BASE_PARSE_CONFIG_PROPERTIES));
        }
      }

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object);
      break;
    }

    case PROP_HEIGHT:
    {
      gint new_height = g_value_get_int (value);

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object);

      if (new_height != props_cfg->height) {
        props_cfg->height = new_height;
        gst_raw_video_parse_update_info (props_cfg);

        if (!gst_raw_video_parse_is_using_sink_caps (raw_video_parse)) {
          gst_raw_base_parse_invalidate_src_caps (raw_base_parse);
          gst_base_parse_set_min_frame_size (base_parse,
              gst_raw_video_parse_get_config_frame_size (raw_base_parse,
                  GST_RAW_BASE_PARSE_CONFIG_PROPERTIES));
        }
      }

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object);
      break;
    }

    case PROP_FORMAT:
    {
      GstVideoFormat new_format = g_value_get_enum (value);

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object);

      if (new_format != props_cfg->format) {
        props_cfg->format = new_format;
        gst_raw_video_parse_update_info (props_cfg);

        if (!gst_raw_video_parse_is_using_sink_caps (raw_video_parse)) {
          gst_raw_base_parse_invalidate_src_caps (raw_base_parse);
          gst_base_parse_set_min_frame_size (base_parse,
              gst_raw_video_parse_get_config_frame_size (raw_base_parse,
                  GST_RAW_BASE_PARSE_CONFIG_PROPERTIES));
        }
      }

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object);
      break;
    }

    case PROP_PIXEL_ASPECT_RATIO:
    {
      GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object);

      /* The pixel aspect ratio does not affect the video frame size,
       * so it is just set directly without any updates */
      props_cfg->pixel_aspect_ratio_n =
          GST_VIDEO_INFO_PAR_N (&(props_cfg->info)) =
          gst_value_get_fraction_numerator (value);
      props_cfg->pixel_aspect_ratio_d =
          GST_VIDEO_INFO_PAR_D (&(props_cfg->info)) =
          gst_value_get_fraction_denominator (value);
      GST_DEBUG_OBJECT (raw_video_parse, "setting pixel aspect ratio to %u/%u",
          props_cfg->pixel_aspect_ratio_n, props_cfg->pixel_aspect_ratio_d);

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object);
      break;
    }

    case PROP_FRAMERATE:
    {
      GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object);

      /* The framerate does not affect the video frame size,
       * so it is just set directly without any updates */
      props_cfg->framerate_n = GST_VIDEO_INFO_FPS_N (&(props_cfg->info)) =
          gst_value_get_fraction_numerator (value);
      props_cfg->framerate_d = GST_VIDEO_INFO_FPS_D (&(props_cfg->info)) =
          gst_value_get_fraction_denominator (value);
      GST_DEBUG_OBJECT (raw_video_parse, "setting framerate to %u/%u",
          props_cfg->framerate_n, props_cfg->framerate_d);

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object);
      break;
    }

    case PROP_INTERLACED:
    {
      GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object);

      /* Interlacing does not affect the video frame size,
       * so it is just set directly without any updates */
      props_cfg->interlaced = g_value_get_boolean (value);
      GST_VIDEO_INFO_INTERLACE_MODE (&(props_cfg->info)) =
          props_cfg->interlaced ? GST_VIDEO_INTERLACE_MODE_INTERLEAVED :
          GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object);

      break;
    }

    case PROP_TOP_FIELD_FIRST:
    {
      /* The top-field-first flag is a detail related to
       * interlacing, so no video info update is needed */

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object);
      props_cfg->top_field_first = g_value_get_boolean (value);
      GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object);
      break;
    }

    case PROP_PLANE_STRIDES:
    {
      GValueArray *valarray = g_value_get_boxed (value);
      guint n_planes;
      guint i;

      /* If no valarray is given, then disable custom
       * plane strides & offsets and stick to the
       * standard computed ones */
      if (valarray == NULL) {
        GST_DEBUG_OBJECT (raw_video_parse,
            "custom plane strides & offsets disabled");
        props_cfg->custom_plane_strides = FALSE;
        gst_raw_video_parse_update_info (props_cfg);
        break;
      }

      /* Sanity check - reject empty arrays */
      if ((valarray != NULL) && (valarray->n_values == 0)) {
        GST_ELEMENT_ERROR (raw_video_parse, LIBRARY, SETTINGS,
            ("plane strides property holds an empty array"), (NULL));
        break;
      }

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object);

      n_planes = GST_VIDEO_INFO_N_PLANES (&(props_cfg->info));

      /* Check that the valarray holds the right number of values */
      if (valarray->n_values != n_planes) {
        GST_ELEMENT_ERROR (raw_video_parse, LIBRARY, SETTINGS,
            ("incorrect number of elements in plane strides property"),
            ("expected: %u, got: %u", n_planes, valarray->n_values));
        GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object);
        break;
      }

      /* Copy the values to the stride array */
      for (i = 0; i < n_planes; ++i) {
        GValue *val = g_value_array_get_nth (valarray, i);
        props_cfg->plane_strides[i] = g_value_get_uint (val);
        GST_DEBUG_OBJECT (raw_video_parse, "plane #%u stride: %d", i,
            props_cfg->plane_strides[i]);
      }

      props_cfg->custom_plane_strides = TRUE;

      gst_raw_video_parse_update_info (props_cfg);

      if (!gst_raw_video_parse_is_using_sink_caps (raw_video_parse))
        gst_base_parse_set_min_frame_size (base_parse,
            gst_raw_video_parse_get_config_frame_size (raw_base_parse,
                GST_RAW_BASE_PARSE_CONFIG_PROPERTIES));

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object);
      break;
    }

    case PROP_PLANE_OFFSETS:
    {
      GValueArray *valarray = g_value_get_boxed (value);
      guint n_planes;
      guint i;

      /* If no valarray is given, then disable custom
       * plane strides & offsets and stick to the
       * standard computed ones */
      if (valarray == NULL) {
        GST_DEBUG_OBJECT (raw_video_parse,
            "custom plane strides & offsets disabled");
        props_cfg->custom_plane_strides = FALSE;
        gst_raw_video_parse_update_info (props_cfg);
        break;
      }

      /* Sanity check - reject empty arrays */
      if ((valarray != NULL) && (valarray->n_values == 0)) {
        GST_ELEMENT_ERROR (raw_video_parse, LIBRARY, SETTINGS,
            ("plane offsets property holds an empty array"), (NULL));
        break;
      }

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object);

      n_planes = GST_VIDEO_INFO_N_PLANES (&(props_cfg->info));

      /* Check that the valarray holds the right number of values */
      if (valarray->n_values != n_planes) {
        GST_ELEMENT_ERROR (raw_video_parse, LIBRARY, SETTINGS,
            ("incorrect number of elements in plane offsets property"),
            ("expected: %u, got: %u", n_planes, valarray->n_values));
        GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object);
        break;
      }

      /* Copy the values to the offset array */
      for (i = 0; i < n_planes; ++i) {
        GValue *val = g_value_array_get_nth (valarray, i);
        props_cfg->plane_offsets[i] = g_value_get_uint (val);
        GST_DEBUG_OBJECT (raw_video_parse, "plane #%u offset: %" G_GSIZE_FORMAT,
            i, props_cfg->plane_offsets[i]);
      }

      props_cfg->custom_plane_strides = TRUE;

      gst_raw_video_parse_update_info (props_cfg);

      if (!gst_raw_video_parse_is_using_sink_caps (raw_video_parse))
        gst_base_parse_set_min_frame_size (base_parse,
            gst_raw_video_parse_get_config_frame_size (raw_base_parse,
                GST_RAW_BASE_PARSE_CONFIG_PROPERTIES));

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object);
      break;
    }

    case PROP_FRAME_STRIDE:
    {
      /* The frame stride does not affect the video frame size,
       * so it is just set directly without any updates */

      GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object);
      props_cfg->frame_stride = g_value_get_uint (value);
      gst_raw_video_parse_update_info (props_cfg);
      if (!gst_raw_video_parse_is_using_sink_caps (raw_video_parse))
        gst_base_parse_set_min_frame_size (base_parse,
            gst_raw_video_parse_get_config_frame_size (raw_base_parse,
                GST_RAW_BASE_PARSE_CONFIG_PROPERTIES));
      GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object);

      break;
    }

    default:
      G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
      break;
  }
}
示例#23
0
static gboolean
gst_raw_video_parse_set_config_from_caps (GstRawBaseParse * raw_base_parse,
    GstRawBaseParseConfig config, GstCaps * caps)
{
  int i;
  GstStructure *structure;
  GstRawVideoParse *raw_video_parse = GST_RAW_VIDEO_PARSE (raw_base_parse);
  GstRawVideoParseConfig *config_ptr =
      gst_raw_video_parse_get_config_ptr (raw_video_parse, config);

  g_assert (caps != NULL);

  /* Caps might get copied, and the copy needs to be unref'd.
   * Also, the caller retains ownership over the original caps.
   * So, to make this mechanism also work with cases where the
   * caps are *not* copied, ref the original caps here first. */
  gst_caps_ref (caps);

  structure = gst_caps_get_structure (caps, 0);

  /* For unaligned raw data, the output caps stay the same,
   * except that video/x-unaligned-raw becomes video/x-raw,
   * since the parser aligns the frame data */
  if (gst_structure_has_name (structure, "video/x-unaligned-raw")) {
    /* Copy the caps to be able to modify them */
    GstCaps *new_caps = gst_caps_copy (caps);
    gst_caps_unref (caps);
    caps = new_caps;

    /* Change the media type to video/x-raw , otherwise
     * gst_video_info_from_caps() won't work */
    structure = gst_caps_get_structure (caps, 0);
    gst_structure_set_name (structure, "video/x-raw");
  }

  config_ptr->ready = gst_video_info_from_caps (&(config_ptr->info), caps);

  if (config_ptr->ready) {
    config_ptr->width = GST_VIDEO_INFO_WIDTH (&(config_ptr->info));
    config_ptr->height = GST_VIDEO_INFO_HEIGHT (&(config_ptr->info));
    config_ptr->pixel_aspect_ratio_n =
        GST_VIDEO_INFO_PAR_N (&(config_ptr->info));
    config_ptr->pixel_aspect_ratio_d =
        GST_VIDEO_INFO_PAR_D (&(config_ptr->info));
    config_ptr->framerate_n = GST_VIDEO_INFO_FPS_N (&(config_ptr->info));
    config_ptr->framerate_d = GST_VIDEO_INFO_FPS_D (&(config_ptr->info));
    config_ptr->interlaced = GST_VIDEO_INFO_IS_INTERLACED (&(config_ptr->info));
    config_ptr->height = GST_VIDEO_INFO_HEIGHT (&(config_ptr->info));
    config_ptr->top_field_first = 0;
    config_ptr->frame_stride = 0;

    for (i = 0; i < GST_VIDEO_MAX_PLANES; ++i) {
      config_ptr->plane_offsets[i] =
          GST_VIDEO_INFO_PLANE_OFFSET (&(config_ptr->info), i);
      config_ptr->plane_strides[i] =
          GST_VIDEO_INFO_PLANE_STRIDE (&(config_ptr->info), i);
    }
  }

  gst_caps_unref (caps);

  return config_ptr->ready;
}
示例#24
0
static void
gst_raw_video_parse_update_info (GstRawVideoParseConfig * config)
{
  guint i;
  guint n_planes;
  guint last_plane;
  gsize last_plane_offset, last_plane_size;
  GstVideoInfo *info = &(config->info);

  GST_DEBUG ("updating info with width %u height %u format %s "
      " custom plane strides&offsets %d", config->width, config->height,
      gst_video_format_to_string (config->format),
      config->custom_plane_strides);

  gst_video_info_set_format (info, config->format, config->width,
      config->height);

  GST_VIDEO_INFO_PAR_N (info) = config->pixel_aspect_ratio_n;
  GST_VIDEO_INFO_PAR_D (info) = config->pixel_aspect_ratio_d;
  GST_VIDEO_INFO_FPS_N (info) = config->framerate_n;
  GST_VIDEO_INFO_FPS_D (info) = config->framerate_d;
  GST_VIDEO_INFO_INTERLACE_MODE (info) =
      config->interlaced ? GST_VIDEO_INTERLACE_MODE_INTERLEAVED :
      GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;

  /* Check if there are custom plane strides & offsets that need to be preserved */
  if (config->custom_plane_strides) {
    /* In case there are, overwrite the offsets&strides computed by
     * gst_video_info_set_format with the custom ones */
    for (i = 0; i < GST_VIDEO_MAX_PLANES; ++i) {
      GST_VIDEO_INFO_PLANE_OFFSET (info, i) = config->plane_offsets[i];
      GST_VIDEO_INFO_PLANE_STRIDE (info, i) = config->plane_strides[i];
    }
  } else {
    /* No custom planes&offsets; copy the computed ones into
     * the plane_offsets & plane_strides arrays to ensure they
     * are equal to the ones in the videoinfo */
    for (i = 0; i < GST_VIDEO_MAX_PLANES; ++i) {
      config->plane_offsets[i] = GST_VIDEO_INFO_PLANE_OFFSET (info, i);
      config->plane_strides[i] = GST_VIDEO_INFO_PLANE_STRIDE (info, i);
    }
  }

  n_planes = GST_VIDEO_INFO_N_PLANES (info);
  if (n_planes < 1)
    n_planes = 1;

  /* Figure out what plane is the physically last one. Typically
   * this is the last plane in the list (= at index n_planes-1).
   * However, this is not guaranteed, so we have to scan the offsets
   * to find the last plane. */
  last_plane_offset = 0;
  last_plane = 0;
  for (i = 0; i < n_planes; ++i) {
    gsize plane_offset = GST_VIDEO_INFO_PLANE_OFFSET (info, i);
    if (plane_offset >= last_plane_offset) {
      last_plane = i;
      last_plane_offset = plane_offset;
    }
  }

  last_plane_size =
      GST_VIDEO_INFO_PLANE_STRIDE (info,
      last_plane) * GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (info->finfo,
      last_plane, config->height);

  GST_VIDEO_INFO_SIZE (info) = last_plane_offset + last_plane_size;

  GST_DEBUG ("last plane #%u:  offset: %" G_GSIZE_FORMAT " size: %"
      G_GSIZE_FORMAT " => frame size minus extra padding: %" G_GSIZE_FORMAT,
      last_plane, last_plane_offset, last_plane_size,
      GST_VIDEO_INFO_SIZE (info));
}
示例#25
0
static gboolean
gst_rtp_vraw_depay_setcaps (GstRTPBaseDepayload * depayload, GstCaps * caps)
{
  GstStructure *structure;
  GstRtpVRawDepay *rtpvrawdepay;
  gint clock_rate;
  const gchar *str;
  gint format, width, height, depth, pgroup, xinc, yinc;
  GstCaps *srccaps;
  gboolean res;
  GstFlowReturn ret;

  rtpvrawdepay = GST_RTP_VRAW_DEPAY (depayload);

  structure = gst_caps_get_structure (caps, 0);

  xinc = yinc = 1;

  if (!gst_structure_get_int (structure, "clock-rate", &clock_rate))
    clock_rate = 90000;         /* default */
  depayload->clock_rate = clock_rate;

  if (!(str = gst_structure_get_string (structure, "width")))
    goto no_width;
  width = atoi (str);

  if (!(str = gst_structure_get_string (structure, "height")))
    goto no_height;
  height = atoi (str);

  if (!(str = gst_structure_get_string (structure, "depth")))
    goto no_depth;
  depth = atoi (str);

  /* optional interlace value but we don't handle interlaced
   * formats yet */
  if (gst_structure_get_string (structure, "interlace"))
    goto interlaced;

  if (!(str = gst_structure_get_string (structure, "sampling")))
    goto no_sampling;

  if (!strcmp (str, "RGB")) {
    format = GST_VIDEO_FORMAT_RGB;
    pgroup = 3;
  } else if (!strcmp (str, "RGBA")) {
    format = GST_VIDEO_FORMAT_RGBA;
    pgroup = 4;
  } else if (!strcmp (str, "BGR")) {
    format = GST_VIDEO_FORMAT_BGR;
    pgroup = 3;
  } else if (!strcmp (str, "BGRA")) {
    format = GST_VIDEO_FORMAT_BGRA;
    pgroup = 4;
  } else if (!strcmp (str, "YCbCr-4:4:4")) {
    format = GST_VIDEO_FORMAT_AYUV;
    pgroup = 3;
  } else if (!strcmp (str, "YCbCr-4:2:2")) {
    if (depth == 8) {
      format = GST_VIDEO_FORMAT_UYVY;
      pgroup = 4;
    } else if (depth == 10) {
      format = GST_VIDEO_FORMAT_UYVP;
      pgroup = 5;
    } else
      goto unknown_format;
    xinc = 2;
  } else if (!strcmp (str, "YCbCr-4:2:0")) {
    format = GST_VIDEO_FORMAT_I420;
    pgroup = 6;
    xinc = yinc = 2;
  } else if (!strcmp (str, "YCbCr-4:1:1")) {
    format = GST_VIDEO_FORMAT_Y41B;
    pgroup = 6;
    xinc = 4;
  } else {
    goto unknown_format;
  }

  gst_video_info_init (&rtpvrawdepay->vinfo);
  gst_video_info_set_format (&rtpvrawdepay->vinfo, format, width, height);
  GST_VIDEO_INFO_FPS_N (&rtpvrawdepay->vinfo) = 0;
  GST_VIDEO_INFO_FPS_D (&rtpvrawdepay->vinfo) = 1;

  rtpvrawdepay->pgroup = pgroup;
  rtpvrawdepay->xinc = xinc;
  rtpvrawdepay->yinc = yinc;

  srccaps = gst_video_info_to_caps (&rtpvrawdepay->vinfo);
  res = gst_pad_set_caps (GST_RTP_BASE_DEPAYLOAD_SRCPAD (depayload), srccaps);
  gst_caps_unref (srccaps);

  GST_DEBUG_OBJECT (depayload, "width %d, height %d, format %d", width, height,
      format);
  GST_DEBUG_OBJECT (depayload, "xinc %d, yinc %d, pgroup %d",
      xinc, yinc, pgroup);

  /* negotiate a bufferpool */
  if ((ret = gst_rtp_vraw_depay_negotiate_pool (rtpvrawdepay, srccaps,
              &rtpvrawdepay->vinfo)) != GST_FLOW_OK)
    goto no_bufferpool;

  return res;

  /* ERRORS */
no_width:
  {
    GST_ERROR_OBJECT (depayload, "no width specified");
    return FALSE;
  }
no_height:
  {
    GST_ERROR_OBJECT (depayload, "no height specified");
    return FALSE;
  }
no_depth:
  {
    GST_ERROR_OBJECT (depayload, "no depth specified");
    return FALSE;
  }
interlaced:
  {
    GST_ERROR_OBJECT (depayload, "interlaced formats not supported yet");
    return FALSE;
  }
no_sampling:
  {
    GST_ERROR_OBJECT (depayload, "no sampling specified");
    return FALSE;
  }
unknown_format:
  {
    GST_ERROR_OBJECT (depayload, "unknown sampling format '%s'", str);
    return FALSE;
  }
no_bufferpool:
  {
    GST_DEBUG_OBJECT (depayload, "no bufferpool");
    return FALSE;
  }
}
示例#26
0
static void
gst_vp8_enc_set_stream_info (GstVPXEnc * enc, GstCaps * caps,
    GstVideoInfo * info)
{
  GstStructure *s;
  GstVideoEncoder *video_encoder;
  GstBuffer *stream_hdr, *vorbiscomment;
  const GstTagList *iface_tags;
  GValue array = { 0, };
  GValue value = { 0, };
  guint8 *data = NULL;
  GstMapInfo map;

  video_encoder = GST_VIDEO_ENCODER (enc);
  s = gst_caps_get_structure (caps, 0);

  /* put buffers in a fixed list */
  g_value_init (&array, GST_TYPE_ARRAY);
  g_value_init (&value, GST_TYPE_BUFFER);

  /* Create Ogg stream-info */
  stream_hdr = gst_buffer_new_and_alloc (26);
  gst_buffer_map (stream_hdr, &map, GST_MAP_WRITE);
  data = map.data;

  GST_WRITE_UINT8 (data, 0x4F);
  GST_WRITE_UINT32_BE (data + 1, 0x56503830);   /* "VP80" */
  GST_WRITE_UINT8 (data + 5, 0x01);     /* stream info header */
  GST_WRITE_UINT8 (data + 6, 1);        /* Major version 1 */
  GST_WRITE_UINT8 (data + 7, 0);        /* Minor version 0 */
  GST_WRITE_UINT16_BE (data + 8, GST_VIDEO_INFO_WIDTH (info));
  GST_WRITE_UINT16_BE (data + 10, GST_VIDEO_INFO_HEIGHT (info));
  GST_WRITE_UINT24_BE (data + 12, GST_VIDEO_INFO_PAR_N (info));
  GST_WRITE_UINT24_BE (data + 15, GST_VIDEO_INFO_PAR_D (info));
  GST_WRITE_UINT32_BE (data + 18, GST_VIDEO_INFO_FPS_N (info));
  GST_WRITE_UINT32_BE (data + 22, GST_VIDEO_INFO_FPS_D (info));

  gst_buffer_unmap (stream_hdr, &map);

  GST_BUFFER_FLAG_SET (stream_hdr, GST_BUFFER_FLAG_HEADER);
  gst_value_set_buffer (&value, stream_hdr);
  gst_value_array_append_value (&array, &value);
  g_value_unset (&value);
  gst_buffer_unref (stream_hdr);

  iface_tags = gst_tag_setter_get_tag_list (GST_TAG_SETTER (video_encoder));
  if (iface_tags) {
    vorbiscomment =
        gst_tag_list_to_vorbiscomment_buffer (iface_tags,
        (const guint8 *) "OVP80\2 ", 7,
        "Encoded with GStreamer vp8enc " PACKAGE_VERSION);

    GST_BUFFER_FLAG_SET (vorbiscomment, GST_BUFFER_FLAG_HEADER);

    g_value_init (&value, GST_TYPE_BUFFER);
    gst_value_set_buffer (&value, vorbiscomment);
    gst_value_array_append_value (&array, &value);
    g_value_unset (&value);
    gst_buffer_unref (vorbiscomment);
  }

  gst_structure_set_value (s, "streamheader", &array);
  g_value_unset (&array);

}
示例#27
0
static gboolean
gst_msdkvpp_set_caps (GstBaseTransform * trans, GstCaps * caps,
    GstCaps * out_caps)
{
  GstMsdkVPP *thiz = GST_MSDKVPP (trans);
  GstVideoInfo in_info, out_info;
  gboolean sinkpad_info_changed = FALSE;
  gboolean srcpad_info_changed = FALSE;
  gboolean deinterlace;

  if (gst_caps_get_features (caps, 0) != gst_caps_get_features (out_caps, 0))
    thiz->need_vpp = 1;

  gst_video_info_from_caps (&in_info, caps);
  gst_video_info_from_caps (&out_info, out_caps);

  if (!gst_video_info_is_equal (&in_info, &thiz->sinkpad_info))
    sinkpad_info_changed = TRUE;
  if (!gst_video_info_is_equal (&out_info, &thiz->srcpad_info))
    srcpad_info_changed = TRUE;

  thiz->sinkpad_info = in_info;
  thiz->srcpad_info = out_info;
#ifndef _WIN32
  thiz->use_video_memory = TRUE;
#else
  thiz->use_video_memory = FALSE;
#endif

  if (!sinkpad_info_changed && !srcpad_info_changed && thiz->initialized)
    return TRUE;

  /* check for deinterlace requirement */
  deinterlace = gst_msdkvpp_is_deinterlace_enabled (thiz, &in_info);
  if (deinterlace)
    thiz->flags |= GST_MSDK_FLAG_DEINTERLACE;

  thiz->buffer_duration = GST_VIDEO_INFO_FPS_N (&out_info) > 0 ?
      gst_util_uint64_scale (GST_SECOND, GST_VIDEO_INFO_FPS_D (&out_info),
      GST_VIDEO_INFO_FPS_N (&out_info)) : 0;

  if (!gst_msdkvpp_initialize (thiz))
    return FALSE;

  /* set passthrough according to filter operation change */
  gst_msdkvpp_set_passthrough (thiz);

  /* Ensure sinkpad buffer pool */
  thiz->sinkpad_buffer_pool =
      gst_msdkvpp_create_buffer_pool (thiz, GST_PAD_SINK, caps,
      thiz->in_num_surfaces);
  if (!thiz->sinkpad_buffer_pool) {
    GST_ERROR_OBJECT (thiz, "Failed to ensure the sinkpad buffer pool");
    return FALSE;
  }
  /* Ensure a srcpad buffer pool */
  thiz->srcpad_buffer_pool =
      gst_msdkvpp_create_buffer_pool (thiz, GST_PAD_SRC, out_caps,
      thiz->out_num_surfaces);
  if (!thiz->srcpad_buffer_pool) {
    GST_ERROR_OBJECT (thiz, "Failed to ensure the srcpad buffer pool");
    return FALSE;
  }

  return TRUE;
}
示例#28
0
static gboolean
gst_pngenc_setcaps (GstPngEnc * pngenc, GstCaps * caps)
{
    int fps_n, fps_d;
    GstCaps *pcaps;
    gboolean ret;
    GstVideoInfo info;

    ret = gst_video_info_from_caps (&info, caps);

    if (G_UNLIKELY (!ret))
        goto done;

    pngenc->info = info;

    switch (GST_VIDEO_INFO_FORMAT (&info)) {
    case GST_VIDEO_FORMAT_RGBA:
        pngenc->png_color_type = PNG_COLOR_TYPE_RGBA;
        pngenc->depth = 8;
        break;
    case GST_VIDEO_FORMAT_RGB:
        pngenc->png_color_type = PNG_COLOR_TYPE_RGB;
        pngenc->depth = 8;
        break;
    case GST_VIDEO_FORMAT_GRAY8:
        pngenc->png_color_type = PNG_COLOR_TYPE_GRAY;
        pngenc->depth = 8;
        break;
    case GST_VIDEO_FORMAT_GRAY16_BE:
        pngenc->png_color_type = PNG_COLOR_TYPE_GRAY;
        pngenc->depth = 16;
        break;
    default:
        ret = FALSE;
        goto done;
    }

    pngenc->width = GST_VIDEO_INFO_WIDTH (&info);
    pngenc->height = GST_VIDEO_INFO_HEIGHT (&info);
    fps_n = GST_VIDEO_INFO_FPS_N (&info);
    fps_d = GST_VIDEO_INFO_FPS_D (&info);

    if (G_UNLIKELY (pngenc->width < 16 || pngenc->width > 1000000 ||
                    pngenc->height < 16 || pngenc->height > 1000000)) {
        ret = FALSE;
        goto done;
    }

    pcaps = gst_caps_new_simple ("image/png",
                                 "width", G_TYPE_INT, pngenc->width,
                                 "height", G_TYPE_INT, pngenc->height,
                                 "framerate", GST_TYPE_FRACTION, fps_n, fps_d, NULL);

    ret = gst_pad_set_caps (pngenc->srcpad, pcaps);

    gst_caps_unref (pcaps);

    /* Fall-through. */
done:
    if (G_UNLIKELY (!ret)) {
        pngenc->width = 0;
        pngenc->height = 0;
    }

    return ret;
}
示例#29
0
static gboolean
gst_msdkvpp_initialize (GstMsdkVPP * thiz)
{
  mfxSession session;
  mfxStatus status;
  mfxFrameAllocRequest request[2];

  if (!thiz->context) {
    GST_WARNING_OBJECT (thiz, "No MSDK Context");
    return FALSE;
  }

  GST_OBJECT_LOCK (thiz);
  session = gst_msdk_context_get_session (thiz->context);

  if (thiz->use_video_memory) {
    gst_msdk_set_frame_allocator (thiz->context);
    thiz->param.IOPattern =
        MFX_IOPATTERN_IN_VIDEO_MEMORY | MFX_IOPATTERN_OUT_VIDEO_MEMORY;
  } else {
    thiz->param.IOPattern =
        MFX_IOPATTERN_IN_SYSTEM_MEMORY | MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
  }

  /* update input video attributes */
  gst_msdk_set_mfx_frame_info_from_video_info (&thiz->param.vpp.In,
      &thiz->sinkpad_info);

  /* update output video attributes, only CSC and Scaling are supported for now */
  gst_msdk_set_mfx_frame_info_from_video_info (&thiz->param.vpp.Out,
      &thiz->srcpad_info);

  /* use msdk frame rarte control if there is a mismatch in In & OUt fps  */
  if (GST_VIDEO_INFO_FPS_N (&thiz->srcpad_info) &&
      (GST_VIDEO_INFO_FPS_N (&thiz->sinkpad_info) !=
          GST_VIDEO_INFO_FPS_N (&thiz->srcpad_info)
          || GST_VIDEO_INFO_FPS_D (&thiz->sinkpad_info) !=
          GST_VIDEO_INFO_FPS_D (&thiz->srcpad_info))) {
    thiz->flags |= GST_MSDK_FLAG_FRC;
    /* So far this is the only algorithm which is working somewhat good */
    thiz->frc_algm = MFX_FRCALGM_PRESERVE_TIMESTAMP;
  }

  /* work-around to avoid zero fps in msdk structure */
  if (!thiz->param.vpp.In.FrameRateExtN)
    thiz->param.vpp.In.FrameRateExtN = 30;
  if (!thiz->param.vpp.Out.FrameRateExtN)
    thiz->param.vpp.Out.FrameRateExtN = thiz->param.vpp.In.FrameRateExtN;

  /* set vpp out picstruct as progressive if deinterlacing enabled */
  if (thiz->flags & GST_MSDK_FLAG_DEINTERLACE)
    thiz->param.vpp.Out.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;

  /* Enable the required filters */
  ensure_filters (thiz);

  /* Add exteneded buffers */
  if (thiz->num_extra_params) {
    thiz->param.NumExtParam = thiz->num_extra_params;
    thiz->param.ExtParam = thiz->extra_params;
  }

  /* validate parameters and allow the Media SDK to make adjustments */
  status = MFXVideoVPP_Query (session, &thiz->param, &thiz->param);
  if (status < MFX_ERR_NONE) {
    GST_ERROR_OBJECT (thiz, "Video VPP Query failed (%s)",
        msdk_status_to_string (status));
    goto no_vpp;
  } else if (status > MFX_ERR_NONE) {
    GST_WARNING_OBJECT (thiz, "Video VPP Query returned: %s",
        msdk_status_to_string (status));
  }

  status = MFXVideoVPP_QueryIOSurf (session, &thiz->param, request);
  if (status < MFX_ERR_NONE) {
    GST_ERROR_OBJECT (thiz, "VPP Query IO surfaces failed (%s)",
        msdk_status_to_string (status));
    goto no_vpp;
  } else if (status > MFX_ERR_NONE) {
    GST_WARNING_OBJECT (thiz, "VPP Query IO surfaces returned: %s",
        msdk_status_to_string (status));
  }

  if (thiz->use_video_memory) {
    /* Input surface pool pre-allocation */
    request[0].Type |= MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
    if (thiz->use_sinkpad_dmabuf)
      request[0].Type |= MFX_MEMTYPE_EXPORT_FRAME;
    gst_msdk_frame_alloc (thiz->context, &(request[0]), &thiz->in_alloc_resp);

    /* Output surface pool pre-allocation */
    request[1].Type |= MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
    if (thiz->use_srcpad_dmabuf)
      request[1].Type |= MFX_MEMTYPE_EXPORT_FRAME;
    gst_msdk_frame_alloc (thiz->context, &(request[1]), &thiz->out_alloc_resp);
  }

  thiz->in_num_surfaces = request[0].NumFrameSuggested;
  thiz->out_num_surfaces = request[1].NumFrameSuggested;


  status = MFXVideoVPP_Init (session, &thiz->param);
  if (status < MFX_ERR_NONE) {
    GST_ERROR_OBJECT (thiz, "Init failed (%s)", msdk_status_to_string (status));
    goto no_vpp;
  } else if (status > MFX_ERR_NONE) {
    GST_WARNING_OBJECT (thiz, "Init returned: %s",
        msdk_status_to_string (status));
  }

  thiz->initialized = TRUE;
  GST_OBJECT_UNLOCK (thiz);
  return TRUE;

no_vpp:
  GST_OBJECT_UNLOCK (thiz);
  if (thiz->context)
    gst_object_replace ((GstObject **) & thiz->context, NULL);
  return FALSE;
}
示例#30
0
文件: base_enc.c 项目: Radbug/gst-fsl
static gboolean gst_fsl_vpu_base_enc_set_format(GstVideoEncoder *encoder, GstVideoCodecState *state)
{
	VpuEncRetCode ret;
	GstVideoCodecState *output_state;
	GstFslVpuBaseEncClass *klass;
	GstFslVpuBaseEnc *vpu_base_enc;
	
	vpu_base_enc = GST_FSL_VPU_BASE_ENC(encoder);
	klass = GST_FSL_VPU_BASE_ENC_CLASS(G_OBJECT_GET_CLASS(vpu_base_enc));

	g_assert(klass->set_open_params != NULL);
	g_assert(klass->get_output_caps != NULL);

	/* Close old encoder instance */
	gst_fsl_vpu_base_enc_close_encoder(vpu_base_enc);

	/* Clean up existing framebuffers structure;
	 * if some previous and still existing buffer pools depend on this framebuffers
	 * structure, they will extend its lifetime, since they ref'd it
	 */
	if (vpu_base_enc->framebuffers != NULL)
	{
		gst_object_unref(vpu_base_enc->framebuffers);
		vpu_base_enc->framebuffers = NULL;
	}

	if (vpu_base_enc->output_phys_buffer != NULL)
	{
		gst_allocator_free(gst_fsl_vpu_enc_allocator_obtain(), (GstMemory *)(vpu_base_enc->output_phys_buffer));
		vpu_base_enc->output_phys_buffer = NULL;
	}

	memset(&(vpu_base_enc->open_param), 0, sizeof(VpuEncOpenParam));

	/* These params are usually not set by derived classes */
	vpu_base_enc->open_param.nPicWidth = GST_VIDEO_INFO_WIDTH(&(state->info));
	vpu_base_enc->open_param.nPicHeight = GST_VIDEO_INFO_HEIGHT(&(state->info));
	vpu_base_enc->open_param.nFrameRate = (GST_VIDEO_INFO_FPS_N(&(state->info)) & 0xffffUL) | (((GST_VIDEO_INFO_FPS_D(&(state->info)) - 1) & 0xffffUL) << 16);
	vpu_base_enc->open_param.sMirror = VPU_ENC_MIRDIR_NONE; /* don't use VPU rotation (IPU has better performance) */
	vpu_base_enc->open_param.nBitRate = vpu_base_enc->bitrate;
	vpu_base_enc->open_param.nGOPSize = vpu_base_enc->gop_size;
	vpu_base_enc->open_param.nUserGamma = (int)(32768 * vpu_base_enc->qp_smoothing);
	vpu_base_enc->open_param.nAvcIntra16x16OnlyModeEnable = vpu_base_enc->intra_16x16_only ? 1 : 0;
	vpu_base_enc->open_param.nRcIntervalMode = 1;
	/* These params are defaults, and are often overwritten by derived classes */
	vpu_base_enc->open_param.nUserQpMax = -1;
	vpu_base_enc->open_param.nUserQpMin = -1;
	vpu_base_enc->open_param.nRcIntraQp = -1;

	GST_DEBUG_OBJECT(vpu_base_enc, "setting bitrate to %u kbps and GOP size to %u", vpu_base_enc->open_param.nBitRate, vpu_base_enc->open_param.nGOPSize);

	/* Give the derived class a chance to set params */
	if (!klass->set_open_params(vpu_base_enc, &(vpu_base_enc->open_param)))
	{
		GST_ERROR_OBJECT(vpu_base_enc, "derived class could not set open params");
		return FALSE;
	}

	/* The actual initialization; requires bitstream information (such as the codec type), which
	 * is determined by the fill_param_set call before */
	ret = VPU_EncOpen(&(vpu_base_enc->handle), &(vpu_base_enc->mem_info), &(vpu_base_enc->open_param));
	if (ret != VPU_ENC_RET_SUCCESS)
	{
		GST_ERROR_OBJECT(vpu_base_enc, "opening new VPU handle failed: %s", gst_fsl_vpu_strerror(ret));
		return FALSE;
	}

	vpu_base_enc->vpu_inst_opened = TRUE;

	/* configure AFTER setting vpu_inst_opened to TRUE, to make sure that in case of
	   config failure the VPU handle is closed in the finalizer */

	ret = VPU_EncConfig(vpu_base_enc->handle, VPU_ENC_CONF_NONE, NULL);
	if (ret != VPU_ENC_RET_SUCCESS)
	{
		GST_ERROR_OBJECT(vpu_base_enc, "could not apply default configuration: %s", gst_fsl_vpu_strerror(ret));
		return FALSE;
	}

	ret = VPU_EncGetInitialInfo(vpu_base_enc->handle, &(vpu_base_enc->init_info));
	if (ret != VPU_ENC_RET_SUCCESS)
	{
		GST_ERROR_OBJECT(vpu_base_enc, "retrieving init info failed: %s", gst_fsl_vpu_strerror(ret));
		return FALSE;
	}

	/* Framebuffers are created in handle_frame(), to make sure the actual stride is used */

	/* Set the output state, using caps defined by the derived class */
	output_state = gst_video_encoder_set_output_state(
		encoder,
		klass->get_output_caps(vpu_base_enc),
		state
	);
	gst_video_codec_state_unref(output_state);

	vpu_base_enc->video_info = state->info;

	return TRUE;
}