예제 #1
0
static GstCaps *
gst_video_convert_fixate_caps (GstBaseTransform * trans,
    GstPadDirection direction, GstCaps * caps, GstCaps * othercaps)
{
  GstCaps *result;

  GST_DEBUG_OBJECT (trans, "trying to fixate othercaps %" GST_PTR_FORMAT
      " based on caps %" GST_PTR_FORMAT, othercaps, caps);

  result = gst_caps_intersect (othercaps, caps);
  if (gst_caps_is_empty (result)) {
    gst_caps_unref (result);
    result = othercaps;
  } else {
    gst_caps_unref (othercaps);
  }

  GST_DEBUG_OBJECT (trans, "now fixating %" GST_PTR_FORMAT, result);

  result = gst_caps_make_writable (result);
  gst_video_convert_fixate_format (trans, caps, result);

  /* fixate remaining fields */
  result = gst_caps_fixate (result);

  return result;
}
예제 #2
0
static GstCaps *
gst_msdkvpp_fixate_caps (GstBaseTransform * trans,
    GstPadDirection direction, GstCaps * caps, GstCaps * othercaps)
{
  GstMsdkVPP *thiz = GST_MSDKVPP (trans);
  GstCaps *result = NULL;
  gboolean *use_dmabuf;

  if (direction == GST_PAD_SRC) {
    result = gst_caps_fixate (result);
    use_dmabuf = &thiz->use_sinkpad_dmabuf;
  } else {
    result = gst_msdkvpp_fixate_srccaps (thiz, caps, othercaps);
    use_dmabuf = &thiz->use_srcpad_dmabuf;
  }

  GST_DEBUG_OBJECT (trans, "fixated to %" GST_PTR_FORMAT, result);
  gst_caps_unref (othercaps);

  if (pad_can_dmabuf (thiz,
          direction == GST_PAD_SRC ? GST_PAD_SINK : GST_PAD_SRC, result)) {
    gst_caps_set_features (result, 0,
        gst_caps_features_new (GST_CAPS_FEATURE_MEMORY_DMABUF, NULL));
    *use_dmabuf = TRUE;
  }

  return result;
}
예제 #3
0
static GstCaps *
gst_ivtc_fixate_caps (GstBaseTransform * trans, GstPadDirection direction,
                      GstCaps * caps, GstCaps * othercaps)
{
    GstCaps *result;

    GST_DEBUG_OBJECT (trans, "fixating caps %" GST_PTR_FORMAT, othercaps);

    result = gst_caps_make_writable (othercaps);
    if (direction == GST_PAD_SINK) {
        GstVideoInfo info;
        if (gst_video_info_from_caps (&info, caps)) {
            /* Smarter decision */
            GST_DEBUG_OBJECT (trans, "Input framerate is %d/%d", info.fps_n,
                              info.fps_d);
            if (info.fps_n == 30000 && info.fps_d == 1001)
                gst_caps_set_simple (result, "framerate", GST_TYPE_FRACTION, 24000,
                                     1001, NULL);
            else
                gst_caps_set_simple (result, "framerate", GST_TYPE_FRACTION, 24, 1,
                                     NULL);
        } else {
            gst_caps_set_simple (result, "framerate", GST_TYPE_FRACTION, 24, 1, NULL);
        }
    }

    result = gst_caps_fixate (result);

    return result;
}
예제 #4
0
static gboolean
gst_mfc_dec_negotiate (GstVideoDecoder * decoder)
{
  GstMFCDec *self = GST_MFC_DEC (decoder);
  GstVideoCodecState *state;
  GstCaps *allowed_caps;
  GstVideoFormat format = GST_VIDEO_FORMAT_NV12;

  allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_DECODER_SRC_PAD (self));
  allowed_caps = gst_caps_truncate (allowed_caps);
  allowed_caps = gst_caps_fixate (allowed_caps);
  if (!gst_caps_is_empty (allowed_caps)) {
    const gchar *format_str;
    GstStructure *s = gst_caps_get_structure (allowed_caps, 0);

    format_str = gst_structure_get_string (s, "format");
    if (format_str)
      format = gst_video_format_from_string (format_str);
  }
  gst_caps_unref (allowed_caps);

  self->format = format;
  state =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (self),
      self->format, self->crop_width, self->crop_height, self->input_state);

  gst_video_codec_state_unref (state);

  return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder);
}
예제 #5
0
/*!
 * \brief OpenIMAJCapGStreamer::setFilter
 * \param prop the property name
 * \param type glib property type
 * \param v1 the value
 * \param v2 second value of property type requires it, else NULL
 * Filter the output formats by setting appsink caps properties
 */
void OpenIMAJCapGStreamer::setFilter(const char *prop, GType type, int v1, int v2)
{
    if(!caps || !( GST_IS_CAPS (caps) ))
    {
        if(type == G_TYPE_INT)
        {
            caps = gst_caps_new_simple("video/x-raw","format",G_TYPE_STRING,"BGR", prop, type, v1, NULL);
        }
        else
        {
            caps = gst_caps_new_simple("video/x-raw","format",G_TYPE_STRING,"BGR", prop, type, v1, v2, NULL);
        }
    }
    else
    {
        if (! gst_caps_is_writable(caps))
            caps = gst_caps_make_writable (caps);
        if(type == G_TYPE_INT){
            gst_caps_set_simple(caps, prop, type, v1, NULL);
        }else{
            gst_caps_set_simple(caps, prop, type, v1, v2, NULL);
        }
    }
    
    caps = gst_caps_fixate(caps);
    
    gst_app_sink_set_caps(GST_APP_SINK(sink), caps);
}
static gboolean
gst_msdkh264enc_set_format (GstMsdkEnc * encoder)
{
  GstMsdkH264Enc *thiz = GST_MSDKH264ENC (encoder);
  GstCaps *template_caps;
  GstCaps *allowed_caps = NULL;

  thiz->profile = 0;
  thiz->level = 0;

  template_caps = gst_static_pad_template_get_caps (&src_factory);
  allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));

  /* If downstream has ANY caps let encoder decide profile and level */
  if (allowed_caps == template_caps) {
    GST_INFO_OBJECT (thiz,
        "downstream has ANY caps, profile/level set to auto");
  } else if (allowed_caps) {
    GstStructure *s;
    const gchar *profile;
    const gchar *level;

    if (gst_caps_is_empty (allowed_caps)) {
      gst_caps_unref (allowed_caps);
      gst_caps_unref (template_caps);
      return FALSE;
    }

    allowed_caps = gst_caps_make_writable (allowed_caps);
    allowed_caps = gst_caps_fixate (allowed_caps);
    s = gst_caps_get_structure (allowed_caps, 0);

    profile = gst_structure_get_string (s, "profile");
    if (profile) {
      if (!strcmp (profile, "high")) {
        thiz->profile = MFX_PROFILE_AVC_HIGH;
      } else if (!strcmp (profile, "main")) {
        thiz->profile = MFX_PROFILE_AVC_MAIN;
      } else if (!strcmp (profile, "baseline")) {
        thiz->profile = MFX_PROFILE_AVC_BASELINE;
      } else if (!strcmp (profile, "constrained-baseline")) {
        thiz->profile = MFX_PROFILE_AVC_CONSTRAINED_BASELINE;
      } else {
        g_assert_not_reached ();
      }
    }

    level = gst_structure_get_string (s, "level");
    if (level) {
      thiz->level = gst_codec_utils_h264_get_level_idc (level);
    }

    gst_caps_unref (allowed_caps);
  }

  gst_caps_unref (template_caps);

  return TRUE;
}
예제 #7
0
static gboolean
gst_msdkvp8enc_set_format (GstMsdkEnc * encoder)
{
  GstMsdkVP8Enc *thiz = GST_MSDKVP8ENC (encoder);
  GstCaps *template_caps;
  GstCaps *allowed_caps = NULL;

  thiz->profile = 0;

  template_caps = gst_static_pad_template_get_caps (&src_factory);
  allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));

  /* If downstream has ANY caps let encoder decide profile and level */
  if (allowed_caps == template_caps) {
    GST_INFO_OBJECT (thiz,
        "downstream has ANY caps, profile/level set to auto");
  } else if (allowed_caps) {
    GstStructure *s;
    const gchar *profile;

    if (gst_caps_is_empty (allowed_caps)) {
      gst_caps_unref (allowed_caps);
      gst_caps_unref (template_caps);
      return FALSE;
    }

    allowed_caps = gst_caps_make_writable (allowed_caps);
    allowed_caps = gst_caps_fixate (allowed_caps);
    s = gst_caps_get_structure (allowed_caps, 0);

    profile = gst_structure_get_string (s, "profile");
    if (profile) {
      if (!strcmp (profile, "3")) {
        thiz->profile = MFX_PROFILE_VP8_3;
      } else if (!strcmp (profile, "2")) {
        thiz->profile = MFX_PROFILE_VP8_2;
      } else if (!strcmp (profile, "1")) {
        thiz->profile = MFX_PROFILE_VP8_1;
      } else if (!strcmp (profile, "0")) {
        thiz->profile = MFX_PROFILE_VP8_0;
      } else {
        g_assert_not_reached ();
      }
    }

    gst_caps_unref (allowed_caps);
  }

  gst_caps_unref (template_caps);

  return TRUE;
}
예제 #8
0
static GstCaps *
gst_ks_video_src_fixate (GstBaseSrc * basesrc, GstCaps * caps)
{
    GstStructure *structure;
    GstCaps *fixated_caps;
    gint i;

    fixated_caps = gst_caps_make_writable (caps);

    for (i = 0; i < gst_caps_get_size (fixated_caps); ++i) {
        structure = gst_caps_get_structure (fixated_caps, i);
        gst_structure_fixate_field_nearest_int (structure, "width", G_MAXINT);
        gst_structure_fixate_field_nearest_int (structure, "height", G_MAXINT);
        gst_structure_fixate_field_nearest_fraction (structure, "framerate",
                G_MAXINT, 1);
    }

    return gst_caps_fixate (fixated_caps);
}
예제 #9
0
/*!
 * \brief CvCapture_GStreamer::setFilter
 * \param prop the property name
 * \param type glib property type
 * \param v1 the value
 * \param v2 second value of property type requires it, else NULL
 * Filter the output formats by setting appsink caps properties
 */
void CvCapture_GStreamer::setFilter(const char *prop, int type, int v1, int v2)
{
    //printf("GStreamer: setFilter \n");
    if(!caps || !( GST_IS_CAPS (caps) ))
    {
        if(type == G_TYPE_INT)
        {
#if GST_VERSION_MAJOR == 0
            caps = gst_caps_new_simple("video/x-raw-rgb", prop, type, v1, NULL);
#else
            caps = gst_caps_new_simple("video/x-raw","format",G_TYPE_STRING,"BGR", prop, type, v1, NULL);
#endif
        }
        else
        {
#if GST_VERSION_MAJOR == 0
            caps = gst_caps_new_simple("video/x-raw-rgb", prop, type, v1, v2, NULL);
#else
            caps = gst_caps_new_simple("video/x-raw","format",G_TYPE_STRING,"BGR", prop, type, v1, v2, NULL);
#endif
        }
    }
    else
    {
#if GST_VERSION_MAJOR > 0
        if (! gst_caps_is_writable(caps))
            caps = gst_caps_make_writable (caps);
#endif
        if(type == G_TYPE_INT){
            gst_caps_set_simple(caps, prop, type, v1, NULL);
        }else{
            gst_caps_set_simple(caps, prop, type, v1, v2, NULL);
        }
    }

#if GST_VERSION_MAJOR > 0
    caps = gst_caps_fixate(caps);
#endif

    gst_app_sink_set_caps(GST_APP_SINK(sink), caps);
    //printf("filtering with %s\n", gst_caps_to_string(caps));
}
예제 #10
0
static GstCaps *
gst_yuv_to_rgb_fixate_caps (GstBaseTransform * trans,
    GstPadDirection direction, GstCaps * caps, GstCaps * othercaps)
{
  GstCaps *result;

  GST_DEBUG_OBJECT (trans, "fixating caps %" GST_PTR_FORMAT, othercaps);

  result = gst_caps_intersect (othercaps, caps);
  if (gst_caps_is_empty (result)) {
    gst_caps_unref (result);
    result = othercaps;
  } else {
    gst_caps_unref (othercaps);
  }

  /* fixate remaining fields */
  result = gst_caps_fixate (result);

  return result;
}
예제 #11
0
static GstCaps *
fs_videoanyrate_fixate_caps (GstBaseTransform * base,
                             GstPadDirection direction, GstCaps * caps, GstCaps * othercaps)
{
    GstStructure *ins, *outs;

    const GValue *from_fr, *to_fr;

    g_return_val_if_fail (gst_caps_is_fixed (caps), othercaps);

    othercaps = gst_caps_make_writable (othercaps);

    GST_DEBUG_OBJECT (base, "trying to fixate othercaps %" GST_PTR_FORMAT
                      " based on caps %" GST_PTR_FORMAT, othercaps, caps);

    ins = gst_caps_get_structure (caps, 0);
    outs = gst_caps_get_structure (othercaps, 0);

    from_fr = gst_structure_get_value (ins, "framerate");
    to_fr = gst_structure_get_value (outs, "framerate");

    /* we have both PAR but they might not be fixated */
    if (from_fr && to_fr && !gst_value_is_fixed (to_fr)) {
        gint from_fr_n, from_fr_d;

        /* from_fr should be fixed */
        g_return_val_if_fail (gst_value_is_fixed (from_fr), othercaps);

        from_fr_n = gst_value_get_fraction_numerator (from_fr);
        from_fr_d = gst_value_get_fraction_denominator (from_fr);

        GST_DEBUG_OBJECT (base, "fixating to_fr nearest to %d/%d",
                          from_fr_n, from_fr_d);
        gst_structure_fixate_field_nearest_fraction (outs, "framerate",
                from_fr_n, from_fr_d);
    }

    return gst_caps_fixate (othercaps);
}
static GstCaps *
gst_v4l2_transform_fixate_caps (GstBaseTransform * trans,
    GstPadDirection direction, GstCaps * caps, GstCaps * othercaps)
{
  GstCaps *result;

  GST_DEBUG_OBJECT (trans, "trying to fixate othercaps %" GST_PTR_FORMAT
      " based on caps %" GST_PTR_FORMAT, othercaps, caps);

  result = gst_caps_intersect (othercaps, caps);
  if (gst_caps_is_empty (result)) {
    gst_caps_unref (result);
    result = othercaps;
  } else {
    gst_caps_unref (othercaps);
  }

  GST_DEBUG_OBJECT (trans, "now fixating %" GST_PTR_FORMAT, result);

  result = gst_caps_fixate (result);

  return result;
}
예제 #13
0
bool nvxio::GStreamerVideoRenderImpl::InitializeGStreamerPipeline()
{
    std::ostringstream stream;

    pipeline = GST_PIPELINE(gst_pipeline_new(NULL));
    if (pipeline == NULL)
    {
        NVXIO_PRINT("Cannot create Gstreamer pipeline");
        return false;
    }

    bus = gst_pipeline_get_bus(GST_PIPELINE (pipeline));

    // create appsrc
    GstElement * appsrcelem = gst_element_factory_make("appsrc", NULL);
    if (appsrcelem == NULL)
    {
        NVXIO_PRINT("Cannot create appsrc");
        FinalizeGStreamerPipeline();

        return false;
    }

    g_object_set(G_OBJECT(appsrcelem), "is-live", 0, NULL);
    g_object_set(G_OBJECT(appsrcelem), "num-buffers", -1, NULL);
    g_object_set(G_OBJECT(appsrcelem), "emit-signals", 0, NULL);
    g_object_set(G_OBJECT(appsrcelem), "block", 1, NULL);
    g_object_set(G_OBJECT(appsrcelem), "size", static_cast<guint64>(wndHeight_ * wndWidth_ * 4), NULL);
    g_object_set(G_OBJECT(appsrcelem), "format", GST_FORMAT_TIME, NULL);
    g_object_set(G_OBJECT(appsrcelem), "stream-type", GST_APP_STREAM_TYPE_STREAM, NULL);

    appsrc = GST_APP_SRC_CAST(appsrcelem);
#if GST_VERSION_MAJOR == 0
    GstCaps * caps = gst_caps_new_simple("video/x-raw-rgb",
                                         "bpp", G_TYPE_INT, 32,
                                         "endianness", G_TYPE_INT, 4321,
                                         "red_mask", G_TYPE_INT, -16777216,
                                         "green_mask", G_TYPE_INT, 16711680,
                                         "blue_mask", G_TYPE_INT, 65280,
                                         "alpha_mask", G_TYPE_INT, 255,
                                         "width", G_TYPE_INT, wndWidth_,
                                         "height", G_TYPE_INT, wndHeight_,
                                         "framerate", GST_TYPE_FRACTION, GSTREAMER_DEFAULT_FPS, 1,
                                         NULL);
    if (caps == NULL)
    {
        NVXIO_PRINT("Failed to create caps");
        FinalizeGStreamerPipeline();

        return false;
    }

#else
    // support 4 channel 8 bit data
    stream << "video/x-raw"
           << ", width=" << wndWidth_
           << ", height=" << wndHeight_
           << ", format=(string){RGBA}"
           << ", framerate=" << GSTREAMER_DEFAULT_FPS << "/1;";
    GstCaps * caps = gst_caps_from_string(stream.str().c_str());

    if (caps == NULL)
    {
        NVXIO_PRINT("Failed to create caps");
        FinalizeGStreamerPipeline();

        return false;
    }

    caps = gst_caps_fixate(caps);
#endif

    gst_app_src_set_caps(appsrc, caps);
    gst_caps_unref(caps);

    gst_bin_add(GST_BIN(pipeline), appsrcelem);

    // create color convert element
    GstElement * color = gst_element_factory_make(COLOR_ELEM, NULL);
    if (color == NULL)
    {
        NVXIO_PRINT("Cannot create " COLOR_ELEM " element");
        FinalizeGStreamerPipeline();

        return false;
    }
    gst_bin_add(GST_BIN(pipeline), color);

    // create videoflip element
    GstElement * videoflip = gst_element_factory_make("videoflip", NULL);
    if (videoflip == NULL)
    {
        NVXIO_PRINT("Cannot create videoflip element");
        FinalizeGStreamerPipeline();

        return false;
    }

    g_object_set(G_OBJECT(videoflip), "method", 5, NULL);

    gst_bin_add(GST_BIN(pipeline), videoflip);

    // create encodelem element
    GstElement * encodelem = gst_element_factory_make(ENCODE_ELEM, NULL);
    if (encodelem == NULL)
    {
        NVXIO_PRINT("Cannot create " ENCODE_ELEM " element");
        FinalizeGStreamerPipeline();

        return false;
    }

    gst_bin_add(GST_BIN(pipeline), encodelem);

    // create avimux element
    GstElement * avimux = gst_element_factory_make("avimux", NULL);
    if (avimux == NULL)
    {
        NVXIO_PRINT("Cannot create avimux element");
        FinalizeGStreamerPipeline();

        return false;
    }

    gst_bin_add(GST_BIN(pipeline), avimux);

    // create filesink element
    GstElement * filesink = gst_element_factory_make("filesink", NULL);
    if (filesink == NULL)
    {
        NVXIO_PRINT("Cannot create filesink element");
        FinalizeGStreamerPipeline();

        return false;
    }

    g_object_set(G_OBJECT(filesink), "location", windowTitle_.c_str(), NULL);
    g_object_set(G_OBJECT(filesink), "append", 0, NULL);

    gst_bin_add(GST_BIN(pipeline), filesink);


    // link elements
    if (!gst_element_link_many(appsrcelem, color, videoflip,
                               encodelem, avimux, filesink, NULL))
    {
        NVXIO_PRINT("GStreamer: cannot link appsrc -> " COLOR_ELEM
                    " -> videoflip -> " ENCODE_ELEM " -> avimux -> filesink");
        FinalizeGStreamerPipeline();

        return false;
    }

    // Force pipeline to play video as fast as possible, ignoring system clock
    gst_pipeline_use_clock(pipeline, NULL);

    num_frames = 0;

    GstStateChangeReturn status = gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
    if (status == GST_STATE_CHANGE_FAILURE)
    {
        NVXIO_PRINT("GStreamer: unable to start playback");
        FinalizeGStreamerPipeline();

        return false;
    }

    return true;
}
예제 #14
0
파일: atdec.c 프로젝트: PeterXu/gst-mobile
static gboolean
gst_atdec_set_format (GstAudioDecoder * decoder, GstCaps * caps)
{
  OSStatus status;
  AudioStreamBasicDescription input_format = { 0 };
  AudioStreamBasicDescription output_format = { 0 };
  GstAudioInfo output_info = { 0 };
  AudioChannelLayout output_layout = { 0 };
  GstCaps *output_caps;
  GstATDec *atdec = GST_ATDEC (decoder);

  GST_DEBUG_OBJECT (atdec, "set_format");

  if (atdec->queue)
    gst_atdec_destroy_queue (atdec, TRUE);

  /* configure input_format from caps */
  gst_caps_to_at_format (caps, &input_format);

  /* negotiate output caps */
  output_caps = gst_pad_get_allowed_caps (GST_AUDIO_DECODER_SRC_PAD (atdec));
  if (!output_caps)
    output_caps =
        gst_pad_get_pad_template_caps (GST_AUDIO_DECODER_SRC_PAD (atdec));
  output_caps = gst_caps_fixate (output_caps);

  gst_caps_set_simple (output_caps,
      "rate", G_TYPE_INT, (int) input_format.mSampleRate,
      "channels", G_TYPE_INT, input_format.mChannelsPerFrame, NULL);

  /* configure output_format from caps */
  gst_caps_to_at_format (output_caps, &output_format);

  /* set the format we want to negotiate downstream */
  gst_audio_info_from_caps (&output_info, output_caps);
  gst_audio_info_set_format (&output_info,
      output_format.mFormatFlags & kLinearPCMFormatFlagIsSignedInteger ?
      GST_AUDIO_FORMAT_S16LE : GST_AUDIO_FORMAT_F32LE,
      output_format.mSampleRate, output_format.mChannelsPerFrame, NULL);
  gst_audio_decoder_set_output_format (decoder, &output_info);
  gst_caps_unref (output_caps);

  status = AudioQueueNewOutput (&input_format, gst_atdec_buffer_emptied,
      atdec, NULL, NULL, 0, &atdec->queue);
  if (status)
    goto create_queue_error;

  /* FIXME: figure out how to map this properly */
  if (output_format.mChannelsPerFrame == 1)
    output_layout.mChannelLayoutTag = kAudioChannelLayoutTag_Mono;
  else
    output_layout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo;

  status = AudioQueueSetOfflineRenderFormat (atdec->queue,
      &output_format, &output_layout);
  if (status)
    goto set_format_error;

  status = AudioQueueStart (atdec->queue, NULL);
  if (status)
    goto start_error;

  return TRUE;

create_queue_error:
  GST_ELEMENT_ERROR (atdec, STREAM, FORMAT, (NULL),
      ("AudioQueueNewOutput returned error: %d", status));
  return FALSE;

set_format_error:
  GST_ELEMENT_ERROR (atdec, STREAM, FORMAT, (NULL),
      ("AudioQueueSetOfflineRenderFormat returned error: %d", status));
  gst_atdec_destroy_queue (atdec, FALSE);
  return FALSE;

start_error:
  GST_ELEMENT_ERROR (atdec, STREAM, FORMAT, (NULL),
      ("AudioQueueStart returned error: %d", status));
  gst_atdec_destroy_queue (atdec, FALSE);
  return FALSE;
}
예제 #15
0
/* the first caps we receive on any of the sinkpads will define the caps for all
 * the other sinkpads because we can only mix streams with the same caps.
 */
static gboolean
gst_audiomixer_setcaps (GstAudioMixer * audiomixer, GstPad * pad,
    GstCaps * orig_caps)
{
  GstAggregator *agg = GST_AGGREGATOR (audiomixer);
  GstAudioAggregator *aagg = GST_AUDIO_AGGREGATOR (audiomixer);
  GstCaps *caps;
  GstAudioInfo info;
  GstStructure *s;
  gint channels = 0;

  caps = gst_caps_copy (orig_caps);

  s = gst_caps_get_structure (caps, 0);
  if (gst_structure_get_int (s, "channels", &channels))
    if (channels <= 2)
      gst_structure_remove_field (s, "channel-mask");

  if (!gst_audio_info_from_caps (&info, caps))
    goto invalid_format;

  if (channels == 1) {
    GstCaps *filter;
    GstCaps *downstream_caps;

    if (audiomixer->filter_caps)
      filter = gst_caps_intersect_full (caps, audiomixer->filter_caps,
          GST_CAPS_INTERSECT_FIRST);
    else
      filter = gst_caps_ref (caps);

    downstream_caps = gst_pad_peer_query_caps (agg->srcpad, filter);
    gst_caps_unref (filter);

    if (downstream_caps) {
      gst_caps_unref (caps);
      caps = downstream_caps;

      if (gst_caps_is_empty (caps)) {
        gst_caps_unref (caps);
        return FALSE;
      }
      caps = gst_caps_fixate (caps);
    }
  }

  GST_OBJECT_LOCK (audiomixer);
  /* don't allow reconfiguration for now; there's still a race between the
   * different upstream threads doing query_caps + accept_caps + sending
   * (possibly different) CAPS events, but there's not much we can do about
   * that, upstream needs to deal with it. */
  if (aagg->current_caps != NULL) {
    if (gst_audio_info_is_equal (&info, &aagg->info)) {
      GST_OBJECT_UNLOCK (audiomixer);
      gst_caps_unref (caps);
      gst_audio_aggregator_set_sink_caps (aagg, GST_AUDIO_AGGREGATOR_PAD (pad),
          orig_caps);
      return TRUE;
    } else {
      GST_DEBUG_OBJECT (pad, "got input caps %" GST_PTR_FORMAT ", but "
          "current caps are %" GST_PTR_FORMAT, caps, aagg->current_caps);
      GST_OBJECT_UNLOCK (audiomixer);
      gst_pad_push_event (pad, gst_event_new_reconfigure ());
      gst_caps_unref (caps);
      return FALSE;
    }
  } else {
    gst_caps_replace (&aagg->current_caps, caps);
    aagg->info = info;
    gst_pad_mark_reconfigure (GST_AGGREGATOR_SRC_PAD (agg));
  }
  GST_OBJECT_UNLOCK (audiomixer);

  gst_audio_aggregator_set_sink_caps (aagg, GST_AUDIO_AGGREGATOR_PAD (pad),
      orig_caps);

  GST_INFO_OBJECT (pad, "handle caps change to %" GST_PTR_FORMAT, caps);

  gst_caps_unref (caps);

  return TRUE;

  /* ERRORS */
invalid_format:
  {
    gst_caps_unref (caps);
    GST_WARNING_OBJECT (audiomixer, "invalid format set as caps");
    return FALSE;
  }
}
예제 #16
0
static gboolean
gst_ffmpegaudenc_set_format (GstAudioEncoder * encoder, GstAudioInfo * info)
{
    GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
    GstCaps *other_caps;
    GstCaps *allowed_caps;
    GstCaps *icaps;
    gsize frame_size;
    GstFFMpegAudEncClass *oclass =
        (GstFFMpegAudEncClass *) G_OBJECT_GET_CLASS (ffmpegaudenc);

    /* close old session */
    if (ffmpegaudenc->opened) {
        gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
        ffmpegaudenc->opened = FALSE;
        if (avcodec_get_context_defaults3 (ffmpegaudenc->context,
                                           oclass->in_plugin) < 0) {
            GST_DEBUG_OBJECT (ffmpegaudenc, "Failed to set context defaults");
            return FALSE;
        }
    }

    /* if we set it in _getcaps we should set it also in _link */
    ffmpegaudenc->context->strict_std_compliance = ffmpegaudenc->compliance;

    /* user defined properties */
    if (ffmpegaudenc->bitrate > 0) {
        GST_INFO_OBJECT (ffmpegaudenc, "Setting avcontext to bitrate %d",
                         ffmpegaudenc->bitrate);
        ffmpegaudenc->context->bit_rate = ffmpegaudenc->bitrate;
        ffmpegaudenc->context->bit_rate_tolerance = ffmpegaudenc->bitrate;
    } else {
        GST_INFO_OBJECT (ffmpegaudenc,
                         "Using avcontext default bitrate %" G_GINT64_FORMAT,
                         (gint64) ffmpegaudenc->context->bit_rate);
    }

    /* RTP payload used for GOB production (for Asterisk) */
    if (ffmpegaudenc->rtp_payload_size) {
        ffmpegaudenc->context->rtp_payload_size = ffmpegaudenc->rtp_payload_size;
    }

    /* some other defaults */
    ffmpegaudenc->context->rc_strategy = 2;
    ffmpegaudenc->context->b_frame_strategy = 0;
    ffmpegaudenc->context->coder_type = 0;
    ffmpegaudenc->context->context_model = 0;
    ffmpegaudenc->context->scenechange_threshold = 0;

    /* fetch pix_fmt and so on */
    gst_ffmpeg_audioinfo_to_context (info, ffmpegaudenc->context);
    if (!ffmpegaudenc->context->time_base.den) {
        ffmpegaudenc->context->time_base.den = GST_AUDIO_INFO_RATE (info);
        ffmpegaudenc->context->time_base.num = 1;
        ffmpegaudenc->context->ticks_per_frame = 1;
    }

    if (ffmpegaudenc->context->channel_layout) {
        gst_ffmpeg_channel_layout_to_gst (ffmpegaudenc->context->channel_layout,
                                          ffmpegaudenc->context->channels, ffmpegaudenc->ffmpeg_layout);
        ffmpegaudenc->needs_reorder =
            (memcmp (ffmpegaudenc->ffmpeg_layout, info->position,
                     sizeof (GstAudioChannelPosition) *
                     ffmpegaudenc->context->channels) != 0);
    }

    /* some codecs support more than one format, first auto-choose one */
    GST_DEBUG_OBJECT (ffmpegaudenc, "picking an output format ...");
    allowed_caps = gst_pad_get_allowed_caps (GST_AUDIO_ENCODER_SRC_PAD (encoder));
    if (!allowed_caps) {
        GST_DEBUG_OBJECT (ffmpegaudenc, "... but no peer, using template caps");
        /* we need to copy because get_allowed_caps returns a ref, and
         * get_pad_template_caps doesn't */
        allowed_caps =
            gst_pad_get_pad_template_caps (GST_AUDIO_ENCODER_SRC_PAD (encoder));
    }
    GST_DEBUG_OBJECT (ffmpegaudenc, "chose caps %" GST_PTR_FORMAT, allowed_caps);
    gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
                                  oclass->in_plugin->type, allowed_caps, ffmpegaudenc->context);

    /* open codec */
    if (gst_ffmpeg_avcodec_open (ffmpegaudenc->context, oclass->in_plugin) < 0) {
        gst_caps_unref (allowed_caps);
        gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
        GST_DEBUG_OBJECT (ffmpegaudenc, "avenc_%s: Failed to open FFMPEG codec",
                          oclass->in_plugin->name);
        if (avcodec_get_context_defaults3 (ffmpegaudenc->context,
                                           oclass->in_plugin) < 0)
            GST_DEBUG_OBJECT (ffmpegaudenc, "Failed to set context defaults");

        if ((oclass->in_plugin->capabilities & CODEC_CAP_EXPERIMENTAL) &&
                ffmpegaudenc->compliance != GST_FFMPEG_EXPERIMENTAL) {
            GST_ELEMENT_ERROR (ffmpegaudenc, LIBRARY, SETTINGS,
                               ("Codec is experimental, but settings don't allow encoders to "
                                "produce output of experimental quality"),
                               ("This codec may not create output that is conformant to the specs "
                                "or of good quality. If you must use it anyway, set the "
                                "compliance property to experimental"));
        }
        return FALSE;
    }

    /* try to set this caps on the other side */
    other_caps = gst_ffmpeg_codecid_to_caps (oclass->in_plugin->id,
                 ffmpegaudenc->context, TRUE);

    if (!other_caps) {
        gst_caps_unref (allowed_caps);
        gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
        GST_DEBUG ("Unsupported codec - no caps found");
        if (avcodec_get_context_defaults3 (ffmpegaudenc->context,
                                           oclass->in_plugin) < 0)
            GST_DEBUG_OBJECT (ffmpegaudenc, "Failed to set context defaults");
        return FALSE;
    }

    icaps = gst_caps_intersect (allowed_caps, other_caps);
    gst_caps_unref (allowed_caps);
    gst_caps_unref (other_caps);
    if (gst_caps_is_empty (icaps)) {
        gst_caps_unref (icaps);
        return FALSE;
    }
    icaps = gst_caps_fixate (icaps);

    if (!gst_audio_encoder_set_output_format (GST_AUDIO_ENCODER (ffmpegaudenc),
            icaps)) {
        gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
        gst_caps_unref (icaps);
        if (avcodec_get_context_defaults3 (ffmpegaudenc->context,
                                           oclass->in_plugin) < 0)
            GST_DEBUG_OBJECT (ffmpegaudenc, "Failed to set context defaults");
        return FALSE;
    }
    gst_caps_unref (icaps);

    frame_size = ffmpegaudenc->context->frame_size;
    if (frame_size > 1) {
        gst_audio_encoder_set_frame_samples_min (GST_AUDIO_ENCODER (ffmpegaudenc),
                frame_size);
        gst_audio_encoder_set_frame_samples_max (GST_AUDIO_ENCODER (ffmpegaudenc),
                frame_size);
        gst_audio_encoder_set_frame_max (GST_AUDIO_ENCODER (ffmpegaudenc), 1);
    } else {
        gst_audio_encoder_set_frame_samples_min (GST_AUDIO_ENCODER (ffmpegaudenc),
                0);
        gst_audio_encoder_set_frame_samples_max (GST_AUDIO_ENCODER (ffmpegaudenc),
                0);
        gst_audio_encoder_set_frame_max (GST_AUDIO_ENCODER (ffmpegaudenc), 0);
    }

    /* Store some tags */
    {
        GstTagList *tags = gst_tag_list_new_empty ();
        const gchar *codec;

        gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE, GST_TAG_NOMINAL_BITRATE,
                          (guint) ffmpegaudenc->context->bit_rate, NULL);

        if ((codec =
                    gst_ffmpeg_get_codecid_longname (ffmpegaudenc->context->codec_id)))
            gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE, GST_TAG_AUDIO_CODEC, codec,
                              NULL);

        gst_audio_encoder_merge_tags (encoder, tags, GST_TAG_MERGE_REPLACE);
        gst_tag_list_unref (tags);
    }

    /* success! */
    ffmpegaudenc->opened = TRUE;

    return TRUE;
}
예제 #17
0
bool GstShow::init_pipeline(const int  xwinid)
{
    pipeline = gst_pipeline_new ("xvoverlay");

    
    //create base pipeline elements
    videosink = gst_element_factory_make("xvimagesink", NULL);

    gst_video_overlay_set_window_handle (GST_VIDEO_OVERLAY (videosink), xwinid);    
    
    mixer = gst_element_factory_make("videomixer", "mix");
    
    
    ///* Manually linking the videoboxes to the mixer */
    GstPadTemplate *mixer_sink_pad_template = gst_element_class_get_pad_template(GST_ELEMENT_GET_CLASS(mixer), "sink_%u");
    
    if(mixer_sink_pad_template == NULL) {
      g_printerr("Could not get mixer pad template.\n");
      // gst_object_unref(something);
      return false;
    }    
    
    GstPad* mixerpads[2];
    
    mixerpads[0] = gst_element_request_pad(mixer, mixer_sink_pad_template, NULL, NULL);
    mixerpads[1] = gst_element_request_pad(mixer, mixer_sink_pad_template, NULL, NULL);
    
    
    g_object_set(mixerpads[0], "xpos",   0, NULL);
    g_object_set(mixerpads[0], "ypos",   0, NULL);
    g_object_set(mixerpads[0], "alpha",1.0, NULL);
    
    g_object_set(mixerpads[1], "xpos", 640, NULL);
    g_object_set(mixerpads[1], "ypos",   0, NULL);
    g_object_set(mixerpads[1], "alpha",1.0, NULL);    
    
    
    gst_object_unref(mixerpads[0]);
    gst_object_unref(mixerpads[1]);
    
    // prepare queue and scale
    
    for (int i = 0; i<2; i++)
    {
        queue[i] = gst_element_factory_make("queue", NULL);
        scale[i] = gst_element_factory_make("videoscale", NULL);
        scalefilter[i] = gst_element_factory_make("capsfilter", NULL);
    
        GstCaps *caps =  gst_caps_new_simple("video/x-raw",
            "width", G_TYPE_INT, 640,
            "height", G_TYPE_INT, 480, 
            //"format", G_TYPE_STRING, "BGR",
            NULL);
        caps = gst_caps_fixate(caps);
        
        g_object_set(G_OBJECT(scalefilter[i]), "caps", caps, NULL);
        gst_caps_unref(caps);
    }
    
    
    
    gst_bin_add_many(GST_BIN(pipeline), queue[0], queue[1], scale[0], scale[1], 
                    scalefilter[0], scalefilter[1], mixer, videosink, NULL);
    
    
    
    return true;
}
예제 #18
0
static GstCaps *
gst_mfxpostproc_transform_caps_impl (GstBaseTransform * trans,
    GstPadDirection direction, GstCaps * caps)
{
  GstMfxPostproc *const vpp = GST_MFXPOSTPROC (trans);
  GstVideoInfo vi, peer_vi;
  GstVideoFormat out_format;
  GstCaps *out_caps, *peer_caps;
  GstMfxCapsFeature feature;
  const gchar *feature_str;
  guint width, height;

  /* Generate the sink pad caps, that could be fixated afterwards */
  if (direction == GST_PAD_SRC) {
    if (!ensure_allowed_sinkpad_caps (vpp))
      return NULL;
    return gst_caps_ref (vpp->allowed_sinkpad_caps);
  }

  /* Generate complete set of src pad caps if non-fixated sink pad
   * caps are provided */
  if (!gst_caps_is_fixed (caps)) {
    if (!ensure_allowed_srcpad_caps (vpp))
      return NULL;
    return gst_caps_ref (vpp->allowed_srcpad_caps);
  }

  /* Generate the expected src pad caps, from the current fixated
   * sink pad caps */
  if (!gst_video_info_from_caps (&vi, caps))
    return NULL;

  if (vpp->deinterlace_mode)
    GST_VIDEO_INFO_INTERLACE_MODE (&vi) = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;

  /* Update size from user-specified parameters */
  find_best_size (vpp, &vi, &width, &height);

  /* Update format from user-specified parameters */
  peer_caps = gst_pad_peer_query_caps (GST_BASE_TRANSFORM_SRC_PAD (trans),
      vpp->allowed_srcpad_caps);

  if (gst_caps_is_any (peer_caps) || gst_caps_is_empty (peer_caps))
    return peer_caps;
  if (!gst_caps_is_fixed (peer_caps))
    peer_caps = gst_caps_fixate (peer_caps);

  gst_video_info_from_caps (&peer_vi, peer_caps);
  out_format = GST_VIDEO_INFO_FPS_N (&peer_vi);

  /* Update width and height from the caps */
  if (GST_VIDEO_INFO_HEIGHT (&peer_vi) != 1 &&
      GST_VIDEO_INFO_WIDTH (&peer_vi) != 1)
    find_best_size(vpp, &peer_vi, &width, &height);

  if (vpp->format != DEFAULT_FORMAT)
    out_format = vpp->format;

  if (vpp->fps_n) {
    GST_VIDEO_INFO_FPS_N (&vi) = vpp->fps_n;
    GST_VIDEO_INFO_FPS_D (&vi) = vpp->fps_d;
    vpp->field_duration = gst_util_uint64_scale (GST_SECOND,
        vpp->fps_d, vpp->fps_n);
    if (DEFAULT_FRC_ALG == vpp->alg)
      vpp->alg = GST_MFX_FRC_PRESERVE_TIMESTAMP;
  }

  if (peer_caps)
    gst_caps_unref (peer_caps);

  feature =
      gst_mfx_find_preferred_caps_feature (GST_BASE_TRANSFORM_SRC_PAD (trans),
        &out_format);
  gst_video_info_change_format (&vi, out_format, width, height);

  out_caps = gst_video_info_to_caps (&vi);
  if (!out_caps)
    return NULL;


  if (feature) {
    feature_str = gst_mfx_caps_feature_to_string (feature);
    if (feature_str)
      gst_caps_set_features (out_caps, 0,
          gst_caps_features_new (feature_str, NULL));
  }

  if (vpp->format != out_format)
    vpp->format = out_format;

  return out_caps;
}
예제 #19
0
static gboolean
gst_sbc_enc_set_format (GstAudioEncoder * audio_enc, GstAudioInfo * info)
{
  const gchar *allocation_method, *channel_mode;
  GstSbcEnc *enc = GST_SBC_ENC (audio_enc);
  GstStructure *s;
  GstCaps *caps, *filter_caps;
  GstCaps *output_caps = NULL;
  guint sampleframes_per_frame;

  enc->rate = GST_AUDIO_INFO_RATE (info);
  enc->channels = GST_AUDIO_INFO_CHANNELS (info);

  /* negotiate output format based on downstream caps restrictions */
  caps = gst_pad_get_allowed_caps (GST_AUDIO_ENCODER_SRC_PAD (enc));
  if (caps == GST_CAPS_NONE || gst_caps_is_empty (caps))
    goto failure;

  if (caps == NULL)
    caps = gst_static_pad_template_get_caps (&sbc_enc_src_factory);

  /* fixate output caps */
  filter_caps = gst_caps_new_simple ("audio/x-sbc", "rate", G_TYPE_INT,
      enc->rate, "channels", G_TYPE_INT, enc->channels, NULL);
  output_caps = gst_caps_intersect (caps, filter_caps);
  gst_caps_unref (filter_caps);

  if (output_caps == NULL || gst_caps_is_empty (output_caps)) {
    GST_WARNING_OBJECT (enc, "Couldn't negotiate output caps with input rate "
        "%d and input channels %d and allowed output caps %" GST_PTR_FORMAT,
        enc->rate, enc->channels, caps);
    goto failure;
  }

  gst_caps_unref (caps);
  caps = NULL;

  GST_DEBUG_OBJECT (enc, "fixating caps %" GST_PTR_FORMAT, output_caps);
  output_caps = gst_caps_truncate (output_caps);
  s = gst_caps_get_structure (output_caps, 0);
  if (enc->channels == 1)
    gst_structure_fixate_field_string (s, "channel-mode", "mono");
  else
    gst_structure_fixate_field_string (s, "channel-mode", "joint");

  gst_structure_fixate_field_nearest_int (s, "bitpool", 64);
  gst_structure_fixate_field_nearest_int (s, "blocks", 16);
  gst_structure_fixate_field_nearest_int (s, "subbands", 8);
  gst_structure_fixate_field_string (s, "allocation-method", "loudness");
  s = NULL;

  /* in case there's anything else left to fixate */
  output_caps = gst_caps_fixate (output_caps);
  gst_caps_set_simple (output_caps, "parsed", G_TYPE_BOOLEAN, TRUE, NULL);

  GST_INFO_OBJECT (enc, "output caps %" GST_PTR_FORMAT, output_caps);

  /* let's see what we fixated to */
  s = gst_caps_get_structure (output_caps, 0);
  gst_structure_get_int (s, "blocks", &enc->blocks);
  gst_structure_get_int (s, "subbands", &enc->subbands);
  gst_structure_get_int (s, "bitpool", &enc->bitpool);
  allocation_method = gst_structure_get_string (s, "allocation-method");
  channel_mode = gst_structure_get_string (s, "channel-mode");

  /* We want channel-mode and channels coherent */
  if (enc->channels == 1) {
    if (g_strcmp0 (channel_mode, "mono") != 0) {
      GST_ERROR_OBJECT (enc, "Can't have channel-mode '%s' for 1 channel",
          channel_mode);
      goto failure;
    }
  } else {
    if (g_strcmp0 (channel_mode, "joint") != 0 &&
        g_strcmp0 (channel_mode, "stereo") != 0 &&
        g_strcmp0 (channel_mode, "dual") != 0) {
      GST_ERROR_OBJECT (enc, "Can't have channel-mode '%s' for 2 channels",
          channel_mode);
      goto failure;
    }
  }

  /* we want to be handed all available samples in handle_frame, but always
   * enough to encode a frame */
  sampleframes_per_frame = enc->blocks * enc->subbands;
  gst_audio_encoder_set_frame_samples_min (audio_enc, sampleframes_per_frame);
  gst_audio_encoder_set_frame_samples_max (audio_enc, sampleframes_per_frame);
  gst_audio_encoder_set_frame_max (audio_enc, 0);

  /* FIXME: what to do with left-over samples at the end? can we encode them? */
  gst_audio_encoder_set_hard_min (audio_enc, TRUE);

  /* and configure encoder based on the output caps we negotiated */
  if (enc->rate == 16000)
    enc->sbc.frequency = SBC_FREQ_16000;
  else if (enc->rate == 32000)
    enc->sbc.frequency = SBC_FREQ_32000;
  else if (enc->rate == 44100)
    enc->sbc.frequency = SBC_FREQ_44100;
  else if (enc->rate == 48000)
    enc->sbc.frequency = SBC_FREQ_48000;
  else
    goto failure;

  if (enc->blocks == 4)
    enc->sbc.blocks = SBC_BLK_4;
  else if (enc->blocks == 8)
    enc->sbc.blocks = SBC_BLK_8;
  else if (enc->blocks == 12)
    enc->sbc.blocks = SBC_BLK_12;
  else if (enc->blocks == 16)
    enc->sbc.blocks = SBC_BLK_16;
  else
    goto failure;

  enc->sbc.subbands = (enc->subbands == 4) ? SBC_SB_4 : SBC_SB_8;
  enc->sbc.bitpool = enc->bitpool;

  if (channel_mode == NULL || allocation_method == NULL)
    goto failure;

  if (strcmp (channel_mode, "joint") == 0)
    enc->sbc.mode = SBC_MODE_JOINT_STEREO;
  else if (strcmp (channel_mode, "stereo") == 0)
    enc->sbc.mode = SBC_MODE_STEREO;
  else if (strcmp (channel_mode, "dual") == 0)
    enc->sbc.mode = SBC_MODE_DUAL_CHANNEL;
  else if (strcmp (channel_mode, "mono") == 0)
    enc->sbc.mode = SBC_MODE_MONO;
  else if (strcmp (channel_mode, "auto") == 0)
    enc->sbc.mode = SBC_MODE_JOINT_STEREO;
  else
    goto failure;

  if (strcmp (allocation_method, "loudness") == 0)
    enc->sbc.allocation = SBC_AM_LOUDNESS;
  else if (strcmp (allocation_method, "snr") == 0)
    enc->sbc.allocation = SBC_AM_SNR;
  else
    goto failure;

  if (!gst_audio_encoder_set_output_format (audio_enc, output_caps))
    goto failure;

  return gst_audio_encoder_negotiate (audio_enc);

failure:
  if (output_caps)
    gst_caps_unref (output_caps);
  if (caps)
    gst_caps_unref (caps);
  return FALSE;
}
예제 #20
0
static gboolean gst_imx_vpu_h264_enc_set_open_params(GstImxVpuBaseEnc *vpu_base_enc, VpuEncOpenParam *open_param)
{
	GstCaps *template_caps, *allowed_caps;
	GstImxVpuH264Enc *enc = GST_IMX_VPU_H264_ENC(vpu_base_enc);

	open_param->eFormat = VPU_V_AVC;
	open_param->eColorFormat = VPU_COLOR_420;

	/* These are default settings from VPU_EncOpenSimp */
	open_param->VpuEncStdParam.avcParam.avc_constrainedIntraPredFlag = 0;
	open_param->VpuEncStdParam.avcParam.avc_disableDeblk = 0;
	open_param->VpuEncStdParam.avcParam.avc_deblkFilterOffsetAlpha = 6;
	open_param->VpuEncStdParam.avcParam.avc_deblkFilterOffsetBeta = 0;
	open_param->VpuEncStdParam.avcParam.avc_chromaQpOffset = 10;
	open_param->VpuEncStdParam.avcParam.avc_audEnable = 0;
	open_param->VpuEncStdParam.avcParam.avc_fmoEnable = 0;
	open_param->VpuEncStdParam.avcParam.avc_fmoType = 0;
	open_param->VpuEncStdParam.avcParam.avc_fmoSliceNum = 1;
	open_param->VpuEncStdParam.avcParam.avc_fmoSliceSaveBufSize = 32;

	/* Since this call is part of set_format, it is a suitable place for looking up whether or not
	 * downstream requires access units. The src caps are retrieved and examined for this purpose. */

	template_caps = gst_static_pad_template_get_caps(&static_src_template);
	allowed_caps = gst_pad_get_allowed_caps(GST_VIDEO_ENCODER_SRC_PAD(GST_VIDEO_ENCODER(vpu_base_enc)));

	if (allowed_caps == template_caps)
	{
		enc->produce_access_units = TRUE;
	}
	else if (allowed_caps != NULL)
	{
		GstStructure *s;
		gchar const *alignment_str;

		if (gst_caps_is_empty(allowed_caps))
		{
			GST_ERROR_OBJECT(enc, "src caps are empty");
			gst_caps_unref(allowed_caps);
			return FALSE;
		}

		allowed_caps = gst_caps_make_writable(allowed_caps);
		allowed_caps = gst_caps_fixate(allowed_caps);
		s = gst_caps_get_structure(allowed_caps, 0);

		alignment_str = gst_structure_get_string(s, "alignment");
		enc->produce_access_units = !g_strcmp0(alignment_str, "au");

		gst_caps_unref(allowed_caps);
	}

	if (enc->produce_access_units)
		open_param->VpuEncStdParam.avcParam.avc_audEnable = 1;

	GST_INFO_OBJECT(vpu_base_enc, "produce h.264 access units: %s", enc->produce_access_units ? "yes" : "no");

	gst_caps_unref(template_caps);

	return TRUE;
}
예제 #21
0
static gboolean
stereosplit_set_output_caps (GstGLStereoSplit * split, GstCaps * sinkcaps)
{
  GstCaps *left = NULL, *right = NULL, *tridcaps = NULL;
  GstCaps *tmp, *combined;
  gboolean res = FALSE;

  /* Choose some preferred output caps.
   * Keep input width/height and PAR, preserve preferred output
   * multiview flags for flipping/flopping if any, and set each
   * left right pad to either left/mono and right/mono, as they prefer
   */

  /* Calculate what downstream can collectively support */
  left =
      stereosplit_get_src_caps (split, split->left_pad,
      GST_VIDEO_MULTIVIEW_MODE_LEFT);
  if (left == NULL)
    goto fail;
  right =
      stereosplit_get_src_caps (split, split->right_pad,
      GST_VIDEO_MULTIVIEW_MODE_RIGHT);
  if (right == NULL)
    goto fail;

  tridcaps = stereosplit_transform_caps (split, GST_PAD_SINK, sinkcaps, NULL);

  if (!tridcaps || gst_caps_is_empty (tridcaps)) {
    GST_ERROR_OBJECT (split,
        "Failed to transform input caps %" GST_PTR_FORMAT, sinkcaps);
    goto fail;
  }

  /* Preserve downstream preferred flipping/flopping */
  tmp =
      strip_mview_fields (gst_caps_ref (left),
      GST_VIDEO_MULTIVIEW_FLAGS_LEFT_FLIPPED |
      GST_VIDEO_MULTIVIEW_FLAGS_LEFT_FLOPPED);
  combined = gst_caps_intersect (tridcaps, tmp);
  gst_caps_unref (tridcaps);
  gst_caps_unref (tmp);
  tridcaps = combined;

  tmp =
      strip_mview_fields (gst_caps_ref (right),
      GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_FLIPPED |
      GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_FLOPPED);
  combined = gst_caps_intersect (tridcaps, tmp);
  gst_caps_unref (tridcaps);
  gst_caps_unref (tmp);
  tridcaps = combined;

  if (G_UNLIKELY (gst_caps_is_empty (tridcaps))) {
    gst_caps_unref (tridcaps);
    goto fail;
  }

  /* Now generate the version for each output pad */
  GST_DEBUG_OBJECT (split, "Attempting to set output caps %" GST_PTR_FORMAT,
      tridcaps);
  tmp = gst_caps_intersect (tridcaps, left);
  gst_caps_unref (left);
  left = tmp;
  left = gst_caps_fixate (left);
  if (!gst_pad_set_caps (split->left_pad, left)) {
    GST_ERROR_OBJECT (split,
        "Failed to set left output caps %" GST_PTR_FORMAT, left);
    goto fail;
  }

  tmp = gst_caps_intersect (tridcaps, right);
  gst_caps_unref (right);
  right = tmp;
  right = gst_caps_fixate (right);
  if (!gst_pad_set_caps (split->right_pad, right)) {
    GST_ERROR_OBJECT (split,
        "Failed to set right output caps %" GST_PTR_FORMAT, right);
    goto fail;
  }

  /* FIXME: Provide left and right caps to do_bufferpool */
  stereosplit_do_bufferpool (split, left);

  res = TRUE;

fail:
  if (left)
    gst_caps_unref (left);
  if (right)
    gst_caps_unref (right);
  if (tridcaps)
    gst_caps_unref (tridcaps);
  return res;
}
예제 #22
0
static gboolean
gst_ffmpegvidenc_set_format (GstVideoEncoder * encoder,
                             GstVideoCodecState * state)
{
    GstCaps *other_caps;
    GstCaps *allowed_caps;
    GstCaps *icaps;
    GstVideoCodecState *output_format;
    enum AVPixelFormat pix_fmt;
    GstFFMpegVidEnc *ffmpegenc = (GstFFMpegVidEnc *) encoder;
    GstFFMpegVidEncClass *oclass =
        (GstFFMpegVidEncClass *) G_OBJECT_GET_CLASS (ffmpegenc);

    /* close old session */
    if (ffmpegenc->opened) {
        gst_ffmpeg_avcodec_close (ffmpegenc->context);
        ffmpegenc->opened = FALSE;
        if (avcodec_get_context_defaults3 (ffmpegenc->context,
                                           oclass->in_plugin) < 0) {
            GST_DEBUG_OBJECT (ffmpegenc, "Failed to set context defaults");
            return FALSE;
        }
    }

    /* if we set it in _getcaps we should set it also in _link */
    ffmpegenc->context->strict_std_compliance = ffmpegenc->compliance;

    /* user defined properties */
    ffmpegenc->context->bit_rate = ffmpegenc->bitrate;
    ffmpegenc->context->bit_rate_tolerance = ffmpegenc->bitrate;
    ffmpegenc->context->gop_size = ffmpegenc->gop_size;
    ffmpegenc->context->me_method = ffmpegenc->me_method;
    GST_DEBUG_OBJECT (ffmpegenc, "Setting avcontext to bitrate %d, gop_size %d",
                      ffmpegenc->bitrate, ffmpegenc->gop_size);

    if (ffmpegenc->max_threads == 0) {
        if (!(oclass->in_plugin->capabilities & CODEC_CAP_AUTO_THREADS))
            ffmpegenc->context->thread_count = gst_ffmpeg_auto_max_threads ();
        else
            ffmpegenc->context->thread_count = 0;
    } else
        ffmpegenc->context->thread_count = ffmpegenc->max_threads;

    /* RTP payload used for GOB production (for Asterisk) */
    if (ffmpegenc->rtp_payload_size) {
        ffmpegenc->context->rtp_payload_size = ffmpegenc->rtp_payload_size;
    }

    /* additional avcodec settings */
    /* first fill in the majority by copying over */
    gst_ffmpeg_cfg_fill_context (ffmpegenc, ffmpegenc->context);

    /* then handle some special cases */
    ffmpegenc->context->lmin = (ffmpegenc->lmin * FF_QP2LAMBDA + 0.5);
    ffmpegenc->context->lmax = (ffmpegenc->lmax * FF_QP2LAMBDA + 0.5);

    if (ffmpegenc->interlaced) {
        ffmpegenc->context->flags |=
            CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME;
    }

    /* some other defaults */
    ffmpegenc->context->rc_strategy = 2;
    ffmpegenc->context->b_frame_strategy = 0;
    ffmpegenc->context->coder_type = 0;
    ffmpegenc->context->context_model = 0;
    ffmpegenc->context->scenechange_threshold = 0;

    /* and last but not least the pass; CBR, 2-pass, etc */
    ffmpegenc->context->flags |= ffmpegenc->pass;
    switch (ffmpegenc->pass) {
    /* some additional action depends on type of pass */
    case CODEC_FLAG_QSCALE:
        ffmpegenc->context->global_quality
            = ffmpegenc->picture->quality = FF_QP2LAMBDA * ffmpegenc->quantizer;
        break;
    case CODEC_FLAG_PASS1:     /* need to prepare a stats file */
        /* we don't close when changing caps, fingers crossed */
        if (!ffmpegenc->file)
            ffmpegenc->file = g_fopen (ffmpegenc->filename, "w");
        if (!ffmpegenc->file)
            goto open_file_err;
        break;
    case CODEC_FLAG_PASS2:
    {   /* need to read the whole stats file ! */
        gsize size;

        if (!g_file_get_contents (ffmpegenc->filename,
                                  &ffmpegenc->context->stats_in, &size, NULL))
            goto file_read_err;

        break;
    }
    default:
        break;
    }

    GST_DEBUG_OBJECT (ffmpegenc, "Extracting common video information");
    /* fetch pix_fmt, fps, par, width, height... */
    gst_ffmpeg_videoinfo_to_context (&state->info, ffmpegenc->context);

    /* sanitize time base */
    if (ffmpegenc->context->time_base.num <= 0
            || ffmpegenc->context->time_base.den <= 0)
        goto insane_timebase;

    if ((oclass->in_plugin->id == AV_CODEC_ID_MPEG4)
            && (ffmpegenc->context->time_base.den > 65535)) {
        /* MPEG4 Standards do not support time_base denominator greater than
         * (1<<16) - 1 . We therefore scale them down.
         * Agreed, it will not be the exact framerate... but the difference
         * shouldn't be that noticeable */
        ffmpegenc->context->time_base.num =
            (gint) gst_util_uint64_scale_int (ffmpegenc->context->time_base.num,
                                              65535, ffmpegenc->context->time_base.den);
        ffmpegenc->context->time_base.den = 65535;
        GST_LOG_OBJECT (ffmpegenc, "MPEG4 : scaled down framerate to %d / %d",
                        ffmpegenc->context->time_base.den, ffmpegenc->context->time_base.num);
    }

    pix_fmt = ffmpegenc->context->pix_fmt;

    /* max-key-interval may need the framerate set above */
    if (ffmpegenc->max_key_interval) {
        AVCodecContext *ctx;

        /* override gop-size */
        ctx = ffmpegenc->context;
        ctx->gop_size = (ffmpegenc->max_key_interval < 0) ?
                        (-ffmpegenc->max_key_interval
                         * (ctx->time_base.den * ctx->ticks_per_frame / ctx->time_base.num))
                        : ffmpegenc->max_key_interval;
    }

    /* some codecs support more than one format, first auto-choose one */
    GST_DEBUG_OBJECT (ffmpegenc, "picking an output format ...");
    allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
    if (!allowed_caps) {
        GST_DEBUG_OBJECT (ffmpegenc, "... but no peer, using template caps");
        /* we need to copy because get_allowed_caps returns a ref, and
         * get_pad_template_caps doesn't */
        allowed_caps =
            gst_pad_get_pad_template_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
    }
    GST_DEBUG_OBJECT (ffmpegenc, "chose caps %" GST_PTR_FORMAT, allowed_caps);
    gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
                                  oclass->in_plugin->type, allowed_caps, ffmpegenc->context);

    /* open codec */
    if (gst_ffmpeg_avcodec_open (ffmpegenc->context, oclass->in_plugin) < 0) {
        gst_caps_unref (allowed_caps);
        goto open_codec_fail;
    }

    /* is the colourspace correct? */
    if (pix_fmt != ffmpegenc->context->pix_fmt) {
        gst_caps_unref (allowed_caps);
        goto pix_fmt_err;
    }

    /* we may have failed mapping caps to a pixfmt,
     * and quite some codecs do not make up their own mind about that
     * in any case, _NONE can never work out later on */
    if (pix_fmt == AV_PIX_FMT_NONE) {
        gst_caps_unref (allowed_caps);
        goto bad_input_fmt;
    }

    /* second pass stats buffer no longer needed */
    g_free (ffmpegenc->context->stats_in);

    /* try to set this caps on the other side */
    other_caps = gst_ffmpeg_codecid_to_caps (oclass->in_plugin->id,
                 ffmpegenc->context, TRUE);

    if (!other_caps) {
        gst_caps_unref (allowed_caps);
        goto unsupported_codec;
    }

    icaps = gst_caps_intersect (allowed_caps, other_caps);
    gst_caps_unref (allowed_caps);
    gst_caps_unref (other_caps);
    if (gst_caps_is_empty (icaps)) {
        gst_caps_unref (icaps);
        goto unsupported_codec;
    }
    icaps = gst_caps_fixate (icaps);

    GST_DEBUG_OBJECT (ffmpegenc, "codec flags 0x%08x", ffmpegenc->context->flags);

    /* Store input state and set output state */
    if (ffmpegenc->input_state)
        gst_video_codec_state_unref (ffmpegenc->input_state);
    ffmpegenc->input_state = gst_video_codec_state_ref (state);

    output_format = gst_video_encoder_set_output_state (encoder, icaps, state);
    gst_video_codec_state_unref (output_format);

    /* Store some tags */
    {
        GstTagList *tags = gst_tag_list_new_empty ();
        const gchar *codec;

        gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE, GST_TAG_NOMINAL_BITRATE,
                          (guint) ffmpegenc->context->bit_rate, NULL);

        if ((codec =
                    gst_ffmpeg_get_codecid_longname (ffmpegenc->context->codec_id)))
            gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE, GST_TAG_VIDEO_CODEC, codec,
                              NULL);

        gst_video_encoder_merge_tags (encoder, tags, GST_TAG_MERGE_REPLACE);
        gst_tag_list_unref (tags);
    }

    /* success! */
    ffmpegenc->opened = TRUE;

    return TRUE;

    /* ERRORS */
open_file_err:
    {
        GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, OPEN_WRITE,
                           (("Could not open file \"%s\" for writing."), ffmpegenc->filename),
                           GST_ERROR_SYSTEM);
        return FALSE;
    }
file_read_err:
    {
        GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, READ,
                           (("Could not get contents of file \"%s\"."), ffmpegenc->filename),
                           GST_ERROR_SYSTEM);
        return FALSE;
    }

insane_timebase:
    {
        GST_ERROR_OBJECT (ffmpegenc, "Rejecting time base %d/%d",
                          ffmpegenc->context->time_base.den, ffmpegenc->context->time_base.num);
        goto cleanup_stats_in;
    }
unsupported_codec:
    {
        GST_DEBUG ("Unsupported codec - no caps found");
        goto cleanup_stats_in;
    }
open_codec_fail:
    {
        GST_DEBUG_OBJECT (ffmpegenc, "avenc_%s: Failed to open libav codec",
                          oclass->in_plugin->name);
        goto close_codec;
    }

pix_fmt_err:
    {
        GST_DEBUG_OBJECT (ffmpegenc,
                          "avenc_%s: AV wants different colourspace (%d given, %d wanted)",
                          oclass->in_plugin->name, pix_fmt, ffmpegenc->context->pix_fmt);
        goto close_codec;
    }

bad_input_fmt:
    {
        GST_DEBUG_OBJECT (ffmpegenc, "avenc_%s: Failed to determine input format",
                          oclass->in_plugin->name);
        goto close_codec;
    }
close_codec:
    {
        gst_ffmpeg_avcodec_close (ffmpegenc->context);
        if (avcodec_get_context_defaults3 (ffmpegenc->context,
                                           oclass->in_plugin) < 0)
            GST_DEBUG_OBJECT (ffmpegenc, "Failed to set context defaults");
        goto cleanup_stats_in;
    }
cleanup_stats_in:
    {
        g_free (ffmpegenc->context->stats_in);
        return FALSE;
    }
}
예제 #23
0
/*!
 * \brief CvVideoWriter_GStreamer::open
 * \param filename filename to output to
 * \param fourcc desired codec fourcc
 * \param fps desired framerate
 * \param frameSize the size of the expected frames
 * \param is_color color or grayscale
 * \return success
 *
 * We support 2 modes of operation. Either the user enters a filename and a fourcc
 * code, or enters a manual pipeline description like in CvVideoCapture_Gstreamer.
 * In the latter case, we just push frames on the appsink with appropriate caps.
 * In the former case, we try to deduce the correct container from the filename,
 * and the correct encoder from the fourcc profile.
 *
 * If the file extension did was not recognize, an avi container is used
 *
 */
bool CvVideoWriter_GStreamer::open( const char * filename, int fourcc,
                                    double fps, CvSize frameSize, bool is_color )
{
    CV_FUNCNAME("CvVideoWriter_GStreamer::open");

    // check arguments
    assert (filename);
    assert (fps > 0);
    assert (frameSize.width > 0  &&  frameSize.height > 0);

    // init gstreamer
    gst_initializer::init();

    // init vars
    bool manualpipeline = true;
    int  bufsize = 0;
    GError *err = NULL;
    const char* mime = NULL;
    GstStateChangeReturn stateret;

    GstCaps* caps = NULL;
    GstCaps* videocaps = NULL;

#if FULL_GST_VERSION >= VERSION_NUM(0,10,32)
    GstCaps* containercaps = NULL;
    GstEncodingContainerProfile* containerprofile = NULL;
    GstEncodingVideoProfile* videoprofile = NULL;
#endif

    GstIterator* it = NULL;
    gboolean done = FALSE;
    GstElement *element = NULL;
    gchar* name = NULL;
    GstElement* splitter = NULL;
    GstElement* combiner = NULL;

    // we first try to construct a pipeline from the given string.
    // if that fails, we assume it is an ordinary filename

    __BEGIN__;

    encodebin = gst_parse_launch(filename, &err);
    manualpipeline = (encodebin != NULL);

    if(manualpipeline)
    {
#if GST_VERSION_MAJOR == 0
        it = gst_bin_iterate_sources(GST_BIN(encodebin));
        if(gst_iterator_next(it, (gpointer *)&source) != GST_ITERATOR_OK) {
            CV_ERROR(CV_StsError, "GStreamer: cannot find appsink in manual pipeline\n");
            return false;
        }
#else
        it = gst_bin_iterate_sources (GST_BIN(encodebin));
        GValue value = G_VALUE_INIT;

        while (!done) {
          switch (gst_iterator_next (it, &value)) {
            case GST_ITERATOR_OK:
              element = GST_ELEMENT (g_value_get_object (&value));
              name = gst_element_get_name(element);
              if (name){
                if(strstr(name, "opencvsrc") != NULL || strstr(name, "appsrc") != NULL) {
                  source = GST_ELEMENT ( gst_object_ref (element) );
                  done = TRUE;
                }
                g_free(name);
              }
              g_value_unset (&value);

              break;
            case GST_ITERATOR_RESYNC:
              gst_iterator_resync (it);
              break;
            case GST_ITERATOR_ERROR:
            case GST_ITERATOR_DONE:
              done = TRUE;
              break;
          }
        }
        gst_iterator_free (it);

        if (!source){
            CV_ERROR(CV_StsError, "GStreamer: cannot find appsrc in manual pipeline\n");
            return false;
        }
#endif
        pipeline = encodebin;
    }
    else
    {
        pipeline = gst_pipeline_new (NULL);

        // we just got a filename and a fourcc code.
        // first, try to guess the container from the filename
        //encodebin = gst_element_factory_make("encodebin", NULL);

        //proxy old non existing fourcc ids. These were used in previous opencv versions,
        //but do not even exist in gstreamer any more
        if (fourcc == CV_FOURCC('M','P','1','V')) fourcc = CV_FOURCC('M', 'P', 'G' ,'1');
        if (fourcc == CV_FOURCC('M','P','2','V')) fourcc = CV_FOURCC('M', 'P', 'G' ,'2');
        if (fourcc == CV_FOURCC('D','R','A','C')) fourcc = CV_FOURCC('d', 'r', 'a' ,'c');


        //create encoder caps from fourcc

        videocaps = gst_riff_create_video_caps(fourcc, NULL, NULL, NULL, NULL, NULL);
        if (!videocaps){
            CV_ERROR( CV_StsUnsupportedFormat, "Gstreamer Opencv backend does not support this codec.");
        }

        //create container caps from file extension
        mime = filenameToMimetype(filename);
        if (!mime) {
            CV_ERROR( CV_StsUnsupportedFormat, "Gstreamer Opencv backend does not support this file type.");
        }

#if FULL_GST_VERSION >= VERSION_NUM(0,10,32)
        containercaps = gst_caps_from_string(mime);

        //create encodebin profile
        containerprofile = gst_encoding_container_profile_new("container", "container", containercaps, NULL);
        videoprofile = gst_encoding_video_profile_new(videocaps, NULL, NULL, 1);
        gst_encoding_container_profile_add_profile(containerprofile, (GstEncodingProfile *) videoprofile);
#endif

        //create pipeline elements
        encodebin = gst_element_factory_make("encodebin", NULL);

#if FULL_GST_VERSION >= VERSION_NUM(0,10,32)
        g_object_set(G_OBJECT(encodebin), "profile", containerprofile, NULL);
#endif
        source = gst_element_factory_make("appsrc", NULL);
        file = gst_element_factory_make("filesink", NULL);
        g_object_set(G_OBJECT(file), "location", filename, NULL);
    }

    if (is_color)
    {
        input_pix_fmt = GST_VIDEO_FORMAT_BGR;
        bufsize = frameSize.width * frameSize.height * 3;

#if GST_VERSION_MAJOR == 0
        caps = gst_video_format_new_caps(GST_VIDEO_FORMAT_BGR,
                                         frameSize.width,
                                         frameSize.height,
                                         int(fps), 1,
                                         1, 1);
#else
        caps = gst_caps_new_simple("video/x-raw",
                                   "format", G_TYPE_STRING, "BGR",
                                   "width", G_TYPE_INT, frameSize.width,
                                   "height", G_TYPE_INT, frameSize.height,
                                   "framerate", GST_TYPE_FRACTION, int(fps), 1,
                                   NULL);
        caps = gst_caps_fixate(caps);

#endif

    }
    else
    {
#if FULL_GST_VERSION >= VERSION_NUM(0,10,29)
        input_pix_fmt = GST_VIDEO_FORMAT_GRAY8;
        bufsize = frameSize.width * frameSize.height;

#if GST_VERSION_MAJOR == 0
        caps = gst_video_format_new_caps(GST_VIDEO_FORMAT_GRAY8,
                                         frameSize.width,
                                         frameSize.height,
                                         int(fps), 1,
                                         1, 1);
#else
        caps = gst_caps_new_simple("video/x-raw",
                                   "format", G_TYPE_STRING, "GRAY8",
                                   "width", G_TYPE_INT, frameSize.width,
                                   "height", G_TYPE_INT, frameSize.height,
                                   "framerate", GST_TYPE_FRACTION, int(fps), 1,
                                   NULL);
        caps = gst_caps_fixate(caps);
#endif
#else
        CV_Assert(!"Gstreamer 0.10.29 or newer is required for grayscale input");
#endif
    }

    gst_app_src_set_caps(GST_APP_SRC(source), caps);
    gst_app_src_set_stream_type(GST_APP_SRC(source), GST_APP_STREAM_TYPE_STREAM);
    gst_app_src_set_size (GST_APP_SRC(source), -1);

    g_object_set(G_OBJECT(source), "format", GST_FORMAT_TIME, NULL);
    g_object_set(G_OBJECT(source), "block", 1, NULL);
    g_object_set(G_OBJECT(source), "is-live", 0, NULL);


    if(!manualpipeline)
    {
        g_object_set(G_OBJECT(file), "buffer-size", bufsize, NULL);
        gst_bin_add_many(GST_BIN(pipeline), source, encodebin, file, NULL);
        if(!gst_element_link_many(source, encodebin, file, NULL)) {
            CV_ERROR(CV_StsError, "GStreamer: cannot link elements\n");
        }
    }

#if GST_VERSION_MAJOR == 0
    // HACK: remove streamsplitter and streamcombiner from
    // encodebin pipeline to prevent early EOF event handling
    // We always fetch BGR or gray-scale frames, so combiner->spliter
    // endge in graph is useless.
    it = gst_bin_iterate_recurse (GST_BIN(encodebin));
    while (!done) {
      switch (gst_iterator_next (it, (void**)&element)) {
        case GST_ITERATOR_OK:
          name = gst_element_get_name(element);
          if (strstr(name, "streamsplitter"))
            splitter = element;
          else if (strstr(name, "streamcombiner"))
            combiner = element;
          break;
        case GST_ITERATOR_RESYNC:
          gst_iterator_resync (it);
          break;
        case GST_ITERATOR_ERROR:
          done = true;
          break;
        case GST_ITERATOR_DONE:
          done = true;
          break;
      }
    }

    gst_iterator_free (it);

    if (splitter && combiner)
    {
        gst_element_unlink(splitter, combiner);

        GstPad* src  = gst_element_get_pad(combiner, "src");
        GstPad* sink = gst_element_get_pad(combiner, "encodingsink");

        GstPad* srcPeer = gst_pad_get_peer(src);
        GstPad* sinkPeer = gst_pad_get_peer(sink);

        gst_pad_unlink(sinkPeer, sink);
        gst_pad_unlink(src, srcPeer);

        gst_pad_link(sinkPeer, srcPeer);

        src = gst_element_get_pad(splitter, "encodingsrc");
        sink = gst_element_get_pad(splitter, "sink");

        srcPeer = gst_pad_get_peer(src);
        sinkPeer = gst_pad_get_peer(sink);

        gst_pad_unlink(sinkPeer, sink);
        gst_pad_unlink(src, srcPeer);

        gst_pad_link(sinkPeer, srcPeer);
    }
#endif

    stateret = gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
    if(stateret  == GST_STATE_CHANGE_FAILURE) {
        handleMessage(pipeline);
        CV_ERROR(CV_StsError, "GStreamer: cannot put pipeline to play\n");
    }

    framerate = fps;
    num_frames = 0;

    handleMessage(pipeline);

    __END__;

    return true;
}
예제 #24
0
static GstFlowReturn
gst_v4l2_video_dec_handle_frame (GstVideoDecoder * decoder,
    GstVideoCodecFrame * frame)
{
  GstV4l2Error error = GST_V4L2_ERROR_INIT;
  GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
  GstFlowReturn ret = GST_FLOW_OK;
  gboolean processed = FALSE;
  GstBuffer *tmp;

  GST_DEBUG_OBJECT (self, "Handling frame %d", frame->system_frame_number);

  if (G_UNLIKELY (!g_atomic_int_get (&self->active)))
    goto flushing;

  if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2output))) {
    if (!self->input_state)
      goto not_negotiated;
    if (!gst_v4l2_object_set_format (self->v4l2output, self->input_state->caps,
          &error))
      goto not_negotiated;
  }

  if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2capture))) {
    GstBufferPool *pool = GST_BUFFER_POOL (self->v4l2output->pool);
    GstVideoInfo info;
    GstVideoCodecState *output_state;
    GstBuffer *codec_data;
    GstCaps *acquired_caps, *available_caps, *caps, *filter;
    GstStructure *st;

    GST_DEBUG_OBJECT (self, "Sending header");

    codec_data = self->input_state->codec_data;

    /* We are running in byte-stream mode, so we don't know the headers, but
     * we need to send something, otherwise the decoder will refuse to
     * intialize.
     */
    if (codec_data) {
      gst_buffer_ref (codec_data);
    } else {
      codec_data = gst_buffer_ref (frame->input_buffer);
      processed = TRUE;
    }

    /* Ensure input internal pool is active */
    if (!gst_buffer_pool_is_active (pool)) {
      GstStructure *config = gst_buffer_pool_get_config (pool);
      gst_buffer_pool_config_set_params (config, self->input_state->caps,
          self->v4l2output->info.size, 2, 2);

      /* There is no reason to refuse this config */
      if (!gst_buffer_pool_set_config (pool, config))
        goto activate_failed;

      if (!gst_buffer_pool_set_active (pool, TRUE))
        goto activate_failed;
    }

    GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
    ret =
        gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->
            v4l2output->pool), &codec_data);
    GST_VIDEO_DECODER_STREAM_LOCK (decoder);

    gst_buffer_unref (codec_data);

    /* For decoders G_FMT returns coded size, G_SELECTION returns visible size
     * in the compose rectangle. gst_v4l2_object_acquire_format() checks both
     * and returns the visible size as with/height and the coded size as
     * padding. */
    if (!gst_v4l2_object_acquire_format (self->v4l2capture, &info))
      goto not_negotiated;

    /* Create caps from the acquired format, remove the format field */
    acquired_caps = gst_video_info_to_caps (&info);
    st = gst_caps_get_structure (acquired_caps, 0);
    gst_structure_remove_field (st, "format");

    /* Probe currently available pixel formats */
    available_caps = gst_v4l2_object_probe_caps (self->v4l2capture, NULL);
    available_caps = gst_caps_make_writable (available_caps);

    /* Replace coded size with visible size, we want to negotiate visible size
     * with downstream, not coded size. */
    gst_caps_map_in_place (available_caps, gst_v4l2_video_remove_padding, self);

    filter = gst_caps_intersect_full (available_caps, acquired_caps,
        GST_CAPS_INTERSECT_FIRST);
    gst_caps_unref (acquired_caps);
    gst_caps_unref (available_caps);
    caps = gst_pad_peer_query_caps (decoder->srcpad, filter);
    gst_caps_unref (filter);

    GST_DEBUG_OBJECT (self, "Possible decoded caps: %" GST_PTR_FORMAT, caps);
    if (gst_caps_is_empty (caps)) {
      gst_caps_unref (caps);
      goto not_negotiated;
    }

    /* Fixate pixel format */
    caps = gst_caps_fixate (caps);

    GST_DEBUG_OBJECT (self, "Chosen decoded caps: %" GST_PTR_FORMAT, caps);

    /* Try to set negotiated format, on success replace acquired format */
    if (gst_v4l2_object_set_format (self->v4l2capture, caps, &error))
      gst_video_info_from_caps (&info, caps);
    else
      gst_v4l2_clear_error (&error);
    gst_caps_unref (caps);

    output_state = gst_video_decoder_set_output_state (decoder,
        info.finfo->format, info.width, info.height, self->input_state);

    /* Copy the rest of the information, there might be more in the future */
    output_state->info.interlace_mode = info.interlace_mode;
    gst_video_codec_state_unref (output_state);

    if (!gst_video_decoder_negotiate (decoder)) {
      if (GST_PAD_IS_FLUSHING (decoder->srcpad))
        goto flushing;
      else
        goto not_negotiated;
    }

    /* Ensure our internal pool is activated */
    if (!gst_buffer_pool_set_active (GST_BUFFER_POOL (self->v4l2capture->pool),
            TRUE))
      goto activate_failed;
  }

  if (g_atomic_int_get (&self->processing) == FALSE) {
    /* It's possible that the processing thread stopped due to an error */
    if (self->output_flow != GST_FLOW_OK &&
        self->output_flow != GST_FLOW_FLUSHING) {
      GST_DEBUG_OBJECT (self, "Processing loop stopped with error, leaving");
      ret = self->output_flow;
      goto drop;
    }

    GST_DEBUG_OBJECT (self, "Starting decoding thread");

    /* Start the processing task, when it quits, the task will disable input
     * processing to unlock input if draining, or prevent potential block */
    g_atomic_int_set (&self->processing, TRUE);
    if (!gst_pad_start_task (decoder->srcpad,
            (GstTaskFunction) gst_v4l2_video_dec_loop, self,
            (GDestroyNotify) gst_v4l2_video_dec_loop_stopped))
      goto start_task_failed;
  }

  if (!processed) {
    GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
    ret =
        gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->v4l2output->
            pool), &frame->input_buffer);
    GST_VIDEO_DECODER_STREAM_LOCK (decoder);

    if (ret == GST_FLOW_FLUSHING) {
      if (g_atomic_int_get (&self->processing) == FALSE)
        ret = self->output_flow;
      goto drop;
    } else if (ret != GST_FLOW_OK) {
      goto process_failed;
    }
  }

  /* No need to keep input arround */
  tmp = frame->input_buffer;
  frame->input_buffer = gst_buffer_new ();
  gst_buffer_copy_into (frame->input_buffer, tmp,
      GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS |
      GST_BUFFER_COPY_META, 0, 0);
  gst_buffer_unref (tmp);

  gst_video_codec_frame_unref (frame);
  return ret;

  /* ERRORS */
not_negotiated:
  {
    GST_ERROR_OBJECT (self, "not negotiated");
    ret = GST_FLOW_NOT_NEGOTIATED;
    gst_v4l2_error (self, &error);
    goto drop;
  }
activate_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS,
        (_("Failed to allocate required memory.")),
        ("Buffer pool activation failed"));
    ret = GST_FLOW_ERROR;
    goto drop;
  }
flushing:
  {
    ret = GST_FLOW_FLUSHING;
    goto drop;
  }

start_task_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
        (_("Failed to start decoding thread.")), (NULL));
    g_atomic_int_set (&self->processing, FALSE);
    ret = GST_FLOW_ERROR;
    goto drop;
  }
process_failed:
  {
    GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
        (_("Failed to process frame.")),
        ("Maybe be due to not enough memory or failing driver"));
    ret = GST_FLOW_ERROR;
    goto drop;
  }
drop:
  {
    gst_video_decoder_drop_frame (decoder, frame);
    return ret;
  }
}
예제 #25
0
static GstCaps *
gst_gl_filter_fixate_caps (GstBaseTransform * bt,
    GstPadDirection direction, GstCaps * caps, GstCaps * othercaps)
{
  GstStructure *ins, *outs;
  const GValue *from_par, *to_par;
  GValue fpar = { 0, }, tpar = {
  0,};

  othercaps = gst_caps_truncate (othercaps);
  othercaps = gst_caps_make_writable (othercaps);

  GST_DEBUG_OBJECT (bt, "trying to fixate othercaps %" GST_PTR_FORMAT
      " based on caps %" GST_PTR_FORMAT, othercaps, caps);

  ins = gst_caps_get_structure (caps, 0);
  outs = gst_caps_get_structure (othercaps, 0);

  from_par = gst_structure_get_value (ins, "pixel-aspect-ratio");
  to_par = gst_structure_get_value (outs, "pixel-aspect-ratio");

  /* If we're fixating from the sinkpad we always set the PAR and
   * assume that missing PAR on the sinkpad means 1/1 and
   * missing PAR on the srcpad means undefined
   */
  if (direction == GST_PAD_SINK) {
    if (!from_par) {
      g_value_init (&fpar, GST_TYPE_FRACTION);
      gst_value_set_fraction (&fpar, 1, 1);
      from_par = &fpar;
    }
    if (!to_par) {
      g_value_init (&tpar, GST_TYPE_FRACTION);
      gst_value_set_fraction (&tpar, 1, 1);
      to_par = &tpar;
    }
  } else {
    if (!to_par) {
      g_value_init (&tpar, GST_TYPE_FRACTION);
      gst_value_set_fraction (&tpar, 1, 1);
      to_par = &tpar;

      gst_structure_set (outs, "pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1,
          NULL);
    }
    if (!from_par) {
      g_value_init (&fpar, GST_TYPE_FRACTION);
      gst_value_set_fraction (&fpar, 1, 1);
      from_par = &fpar;
    }
  }

  /* we have both PAR but they might not be fixated */
  {
    gint from_w, from_h, from_par_n, from_par_d, to_par_n, to_par_d;
    gint w = 0, h = 0;
    gint from_dar_n, from_dar_d;
    gint num, den;

    /* from_par should be fixed */
    g_return_val_if_fail (gst_value_is_fixed (from_par), othercaps);

    from_par_n = gst_value_get_fraction_numerator (from_par);
    from_par_d = gst_value_get_fraction_denominator (from_par);

    gst_structure_get_int (ins, "width", &from_w);
    gst_structure_get_int (ins, "height", &from_h);

    gst_structure_get_int (outs, "width", &w);
    gst_structure_get_int (outs, "height", &h);

    /* if both width and height are already fixed, we can't do anything
     * about it anymore */
    if (w && h) {
      gint n = 1, d = 1;

      GST_DEBUG_OBJECT (bt, "dimensions already set to %dx%d, not fixating",
          w, h);
      if (!gst_value_is_fixed (to_par)) {
        GST_DEBUG_OBJECT (bt, "fixating to_par to %dx%d", n, d);
        if (gst_structure_has_field (outs, "pixel-aspect-ratio"))
          gst_structure_fixate_field_nearest_fraction (outs,
              "pixel-aspect-ratio", 1, 1);
        else if (n != d)
          gst_structure_set (outs, "pixel-aspect-ratio", GST_TYPE_FRACTION,
              1, 1, NULL);
      }
      goto done;
    }

    /* Calculate input DAR */
    if (!gst_util_fraction_multiply (from_w, from_h, from_par_n, from_par_d,
            &from_dar_n, &from_dar_d)) {
      GST_ELEMENT_ERROR (bt, CORE, NEGOTIATION, (NULL),
          ("Error calculating the output scaled size - integer overflow"));
      goto done;
    }

    GST_DEBUG_OBJECT (bt, "Input DAR is %d/%d", from_dar_n, from_dar_d);

    /* If either width or height are fixed there's not much we
     * can do either except choosing a height or width and PAR
     * that matches the DAR as good as possible
     */
    if (h) {
      gint num, den;

      GST_DEBUG_OBJECT (bt, "height is fixed (%d)", h);

      if (!gst_value_is_fixed (to_par)) {
        gst_value_set_fraction (&tpar, 1, 1);
      }

      /* PAR is fixed, choose the height that is nearest to the
       * height with the same DAR */
      to_par_n = gst_value_get_fraction_numerator (to_par);
      to_par_d = gst_value_get_fraction_denominator (to_par);

      GST_DEBUG_OBJECT (bt, "PAR is fixed %d/%d", to_par_n, to_par_d);

      if (!gst_util_fraction_multiply (from_dar_n, from_dar_d, to_par_d,
              to_par_n, &num, &den)) {
        GST_ELEMENT_ERROR (bt, CORE, NEGOTIATION, (NULL),
            ("Error calculating the output scaled size - integer overflow"));
        goto done;
      }

      w = (guint) gst_util_uint64_scale_int (h, num, den);
      gst_structure_fixate_field_nearest_int (outs, "width", w);

      goto done;
    } else if (w) {
      gint num, den;

      GST_DEBUG_OBJECT (bt, "width is fixed (%d)", w);

      if (!gst_value_is_fixed (to_par)) {
        gst_value_set_fraction (&tpar, 1, 1);
      }

      /* PAR is fixed, choose the height that is nearest to the
       * height with the same DAR */
      to_par_n = gst_value_get_fraction_numerator (to_par);
      to_par_d = gst_value_get_fraction_denominator (to_par);

      GST_DEBUG_OBJECT (bt, "PAR is fixed %d/%d", to_par_n, to_par_d);

      if (!gst_util_fraction_multiply (from_dar_n, from_dar_d, to_par_d,
              to_par_n, &num, &den)) {
        GST_ELEMENT_ERROR (bt, CORE, NEGOTIATION, (NULL),
            ("Error calculating the output scaled size - integer overflow"));
        goto done;
      }

      h = (guint) gst_util_uint64_scale_int (w, den, num);
      gst_structure_fixate_field_nearest_int (outs, "height", h);

      goto done;
    } else if (gst_value_is_fixed (to_par)) {
      GstStructure *tmp;
      gint set_h, set_w, f_h, f_w;

      to_par_n = gst_value_get_fraction_numerator (to_par);
      to_par_d = gst_value_get_fraction_denominator (to_par);

      /* Calculate scale factor for the PAR change */
      if (!gst_util_fraction_multiply (from_dar_n, from_dar_d, to_par_n,
              to_par_d, &num, &den)) {
        GST_ELEMENT_ERROR (bt, CORE, NEGOTIATION, (NULL),
            ("Error calculating the output scaled size - integer overflow"));
        goto done;
      }

      /* Try to keep the input height */
      tmp = gst_structure_copy (outs);
      gst_structure_fixate_field_nearest_int (tmp, "height", from_h);
      gst_structure_get_int (tmp, "height", &set_h);

      /* This might have failed but try to scale the width
       * to keep the DAR nonetheless */
      w = (guint) gst_util_uint64_scale_int (set_h, num, den);
      gst_structure_fixate_field_nearest_int (tmp, "width", w);
      gst_structure_get_int (tmp, "width", &set_w);
      gst_structure_free (tmp);

      /* We kept the DAR and the height is nearest to the original height */
      if (set_w == w) {
        gst_structure_set (outs, "width", G_TYPE_INT, set_w, "height",
            G_TYPE_INT, set_h, NULL);
        goto done;
      }

      f_h = set_h;
      f_w = set_w;

      /* If the former failed, try to keep the input width at least */
      tmp = gst_structure_copy (outs);
      gst_structure_fixate_field_nearest_int (tmp, "width", from_w);
      gst_structure_get_int (tmp, "width", &set_w);

      /* This might have failed but try to scale the width
       * to keep the DAR nonetheless */
      h = (guint) gst_util_uint64_scale_int (set_w, den, num);
      gst_structure_fixate_field_nearest_int (tmp, "height", h);
      gst_structure_get_int (tmp, "height", &set_h);
      gst_structure_free (tmp);

      /* We kept the DAR and the width is nearest to the original width */
      if (set_h == h) {
        gst_structure_set (outs, "width", G_TYPE_INT, set_w, "height",
            G_TYPE_INT, set_h, NULL);
        goto done;
      }

      /* If all this failed, keep the height that was nearest to the orignal
       * height and the nearest possible width. This changes the DAR but
       * there's not much else to do here.
       */
      gst_structure_set (outs, "width", G_TYPE_INT, f_w, "height", G_TYPE_INT,
          f_h, NULL);
      goto done;
    } else {
      GstStructure *tmp;
      gint set_h, set_w, set_par_n, set_par_d, tmp2;

      /* width, height and PAR are not fixed */

      /* First try to keep the height and width as good as possible
       * and scale PAR */
      tmp = gst_structure_copy (outs);
      gst_structure_fixate_field_nearest_int (tmp, "height", from_h);
      gst_structure_get_int (tmp, "height", &set_h);
      gst_structure_fixate_field_nearest_int (tmp, "width", from_w);
      gst_structure_get_int (tmp, "width", &set_w);

      if (!gst_util_fraction_multiply (from_dar_n, from_dar_d, set_h, set_w,
              &to_par_n, &to_par_d)) {
        GST_ELEMENT_ERROR (bt, CORE, NEGOTIATION, (NULL),
            ("Error calculating the output scaled size - integer overflow"));
        goto done;
      }

      if (!gst_structure_has_field (tmp, "pixel-aspect-ratio"))
        gst_structure_set_value (tmp, "pixel-aspect-ratio", to_par);
      gst_structure_fixate_field_nearest_fraction (tmp, "pixel-aspect-ratio",
          to_par_n, to_par_d);
      gst_structure_get_fraction (tmp, "pixel-aspect-ratio", &set_par_n,
          &set_par_d);
      gst_structure_free (tmp);

      if (set_par_n == to_par_n && set_par_d == to_par_d) {
        gst_structure_set (outs, "width", G_TYPE_INT, set_w, "height",
            G_TYPE_INT, set_h, NULL);

        if (gst_structure_has_field (outs, "pixel-aspect-ratio") ||
            set_par_n != set_par_d)
          gst_structure_set (outs, "pixel-aspect-ratio", GST_TYPE_FRACTION,
              set_par_n, set_par_d, NULL);
        goto done;
      }

      /* Otherwise try to scale width to keep the DAR with the set
       * PAR and height */
      if (!gst_util_fraction_multiply (from_dar_n, from_dar_d, set_par_d,
              set_par_n, &num, &den)) {
        GST_ELEMENT_ERROR (bt, CORE, NEGOTIATION, (NULL),
            ("Error calculating the output scaled size - integer overflow"));
        goto done;
      }

      w = (guint) gst_util_uint64_scale_int (set_h, num, den);
      tmp = gst_structure_copy (outs);
      gst_structure_fixate_field_nearest_int (tmp, "width", w);
      gst_structure_get_int (tmp, "width", &tmp2);
      gst_structure_free (tmp);

      if (tmp2 == w) {
        gst_structure_set (outs, "width", G_TYPE_INT, tmp2, "height",
            G_TYPE_INT, set_h, NULL);
        if (gst_structure_has_field (outs, "pixel-aspect-ratio") ||
            set_par_n != set_par_d)
          gst_structure_set (outs, "pixel-aspect-ratio", GST_TYPE_FRACTION,
              set_par_n, set_par_d, NULL);
        goto done;
      }

      /* ... or try the same with the height */
      h = (guint) gst_util_uint64_scale_int (set_w, den, num);
      tmp = gst_structure_copy (outs);
      gst_structure_fixate_field_nearest_int (tmp, "height", h);
      gst_structure_get_int (tmp, "height", &tmp2);
      gst_structure_free (tmp);

      if (tmp2 == h) {
        gst_structure_set (outs, "width", G_TYPE_INT, set_w, "height",
            G_TYPE_INT, tmp2, NULL);
        if (gst_structure_has_field (outs, "pixel-aspect-ratio") ||
            set_par_n != set_par_d)
          gst_structure_set (outs, "pixel-aspect-ratio", GST_TYPE_FRACTION,
              set_par_n, set_par_d, NULL);
        goto done;
      }

      /* If all fails we can't keep the DAR and take the nearest values
       * for everything from the first try */
      gst_structure_set (outs, "width", G_TYPE_INT, set_w, "height",
          G_TYPE_INT, set_h, NULL);
      if (gst_structure_has_field (outs, "pixel-aspect-ratio") ||
          set_par_n != set_par_d)
        gst_structure_set (outs, "pixel-aspect-ratio", GST_TYPE_FRACTION,
            set_par_n, set_par_d, NULL);
    }
  }


done:
  othercaps = gst_caps_fixate (othercaps);

  GST_DEBUG_OBJECT (bt, "fixated othercaps to %" GST_PTR_FORMAT, othercaps);

  if (from_par == &fpar)
    g_value_unset (&fpar);
  if (to_par == &tpar)
    g_value_unset (&tpar);

  return othercaps;
}