Пример #1
0
gboolean
gst_niimaqsrc_set_caps (GstBaseSrc * bsrc, GstCaps * caps)
{
  GstNiImaqSrc *src = GST_NIIMAQSRC (bsrc);
  gboolean res = TRUE;
  int depth, ncomps;
  GstVideoInfo vinfo;

  res = gst_video_info_from_caps (&vinfo, caps);
  if (!res) {
    GST_WARNING_OBJECT (src, "Unable to parse video info from caps");
    return res;
  }
  src->format = GST_VIDEO_INFO_FORMAT (&vinfo);
  src->width = GST_VIDEO_INFO_WIDTH (&vinfo);
  src->height = GST_VIDEO_INFO_HEIGHT (&vinfo);

  /* this will handle byte alignment (i.e. row multiple of 4 bytes) */
  src->framesize = GST_VIDEO_INFO_SIZE (&vinfo);

  gst_base_src_set_blocksize (bsrc, src->framesize);

  ncomps = GST_VIDEO_INFO_N_COMPONENTS (&vinfo);
  depth = GST_VIDEO_INFO_COMP_DEPTH (&vinfo, 0);

  /* use this so NI can give us proper byte alignment */
  src->rowpixels =
      GST_VIDEO_INFO_COMP_STRIDE (&vinfo, 0) / (ncomps * depth / 8);

  GST_LOG_OBJECT (src, "Caps set, framesize=%d, rowpixels=%d",
      src->framesize, src->rowpixels);

  return res;
}
/* based on upstream gst-plugins-good jpegencoder */
static void
generate_sampling_factors (GstVaapiEncoderJpeg * encoder)
{
  GstVideoInfo *vinfo;
  gint i;

  vinfo = GST_VAAPI_ENCODER_VIDEO_INFO (encoder);

  if (GST_VIDEO_INFO_FORMAT (vinfo) == GST_VIDEO_FORMAT_ENCODED) {
    /* Use native I420 format */
    encoder->n_components = 3;
    for (i = 0; i < encoder->n_components; ++i) {
      if (i == 0)
        encoder->h_samp[i] = encoder->v_samp[i] = 2;
      else
        encoder->h_samp[i] = encoder->v_samp[i] = 1;
      GST_DEBUG ("sampling factors: %d %d", encoder->h_samp[i],
          encoder->v_samp[i]);
    }
    return;
  }

  encoder->n_components = GST_VIDEO_INFO_N_COMPONENTS (vinfo);

  encoder->h_max_samp = 0;
  encoder->v_max_samp = 0;
  for (i = 0; i < encoder->n_components; ++i) {
    encoder->cwidth[i] = GST_VIDEO_INFO_COMP_WIDTH (vinfo, i);
    encoder->cheight[i] = GST_VIDEO_INFO_COMP_HEIGHT (vinfo, i);
    encoder->h_samp[i] =
        GST_ROUND_UP_4 (GST_VIDEO_INFO_WIDTH (vinfo)) / encoder->cwidth[i];
    encoder->h_max_samp = MAX (encoder->h_max_samp, encoder->h_samp[i]);
    encoder->v_samp[i] =
        GST_ROUND_UP_4 (GST_VIDEO_INFO_HEIGHT (vinfo)) / encoder->cheight[i];
    encoder->v_max_samp = MAX (encoder->v_max_samp, encoder->v_samp[i]);
  }
  /* samp should only be 1, 2 or 4 */
  g_assert (encoder->h_max_samp <= 4);
  g_assert (encoder->v_max_samp <= 4);

  /* now invert */
  /* maximum is invariant, as one of the components should have samp 1 */
  for (i = 0; i < encoder->n_components; ++i) {
    encoder->h_samp[i] = encoder->h_max_samp / encoder->h_samp[i];
    encoder->v_samp[i] = encoder->v_max_samp / encoder->v_samp[i];
    GST_DEBUG ("sampling factors: %d %d", encoder->h_samp[i],
        encoder->v_samp[i]);
  }
}
Пример #3
0
gboolean
gst_opencv_parse_iplimage_params_from_caps (GstCaps * caps, gint * width,
    gint * height, gint * ipldepth, gint * channels, GError ** err)
{
  GstVideoInfo info;
  gint i, depth = 0;

  if (!gst_video_info_from_caps (&info, caps)) {
    GST_ERROR ("Failed to get the videoinfo from caps");
    g_set_error (err, GST_CORE_ERROR, GST_CORE_ERROR_NEGOTIATION,
        "No width/heighti/depth/channels in caps");
    return FALSE;
  }

  *width = GST_VIDEO_INFO_WIDTH (&info);
  *height = GST_VIDEO_INFO_HEIGHT (&info);
  if (GST_VIDEO_INFO_IS_RGB (&info))
    *channels = 3;
  else if (GST_VIDEO_INFO_IS_GRAY (&info))
    *channels = 1;
  else {
    g_set_error (err, GST_CORE_ERROR, GST_CORE_ERROR_NEGOTIATION,
        "Unsupported caps %s", gst_caps_to_string(caps));
    return FALSE;
  }

  for (i = 0; i < GST_VIDEO_INFO_N_COMPONENTS (&info); i++)
    depth += GST_VIDEO_INFO_COMP_DEPTH (&info, i);

  if (depth / *channels == 8) {
    /* TODO signdness? */
    *ipldepth = IPL_DEPTH_8U;
  } else if (depth / *channels == 16) {
    *ipldepth = IPL_DEPTH_16U;
  } else {
    g_set_error (err, GST_CORE_ERROR, GST_CORE_ERROR_NEGOTIATION,
        "Unsupported depth/channels %d/%d", depth, *channels);
    return FALSE;
  }
  return TRUE;
}
Пример #4
0
static gdouble
gst_compare_ssim (GstCompare * comp, GstBuffer * buf1, GstCaps * caps1,
    GstBuffer * buf2, GstCaps * caps2)
{
  GstVideoInfo info1, info2;
  GstVideoFrame frame1, frame2;
  gint i, comps;
  gdouble cssim[4], ssim, c[4] = { 1.0, 0.0, 0.0, 0.0 };

  if (!caps1)
    goto invalid_input;

  if (!gst_video_info_from_caps (&info1, caps1))
    goto invalid_input;

  if (!caps2)
    goto invalid_input;

  if (!gst_video_info_from_caps (&info2, caps1))
    goto invalid_input;

  if (GST_VIDEO_INFO_FORMAT (&info1) != GST_VIDEO_INFO_FORMAT (&info2) ||
      GST_VIDEO_INFO_WIDTH (&info1) != GST_VIDEO_INFO_WIDTH (&info2) ||
      GST_VIDEO_INFO_HEIGHT (&info1) != GST_VIDEO_INFO_HEIGHT (&info2))
    return comp->threshold + 1;

  comps = GST_VIDEO_INFO_N_COMPONENTS (&info1);
  /* note that some are reported both yuv and gray */
  for (i = 0; i < comps; ++i)
    c[i] = 1.0;
  /* increase luma weight if yuv */
  if (GST_VIDEO_INFO_IS_YUV (&info1) && (comps > 1))
    c[0] = comps - 1;
  for (i = 0; i < comps; ++i)
    c[i] /= (GST_VIDEO_INFO_IS_YUV (&info1) && (comps > 1)) ?
        2 * (comps - 1) : comps;

  gst_video_frame_map (&frame1, &info1, buf1, GST_MAP_READ);
  gst_video_frame_map (&frame2, &info2, buf2, GST_MAP_READ);

  for (i = 0; i < comps; i++) {
    gint cw, ch, step, stride;

    /* only support most common formats */
    if (GST_VIDEO_INFO_COMP_DEPTH (&info1, i) != 8)
      goto unsupported_input;
    cw = GST_VIDEO_FRAME_COMP_WIDTH (&frame1, i);
    ch = GST_VIDEO_FRAME_COMP_HEIGHT (&frame1, i);
    step = GST_VIDEO_FRAME_COMP_PSTRIDE (&frame1, i);
    stride = GST_VIDEO_FRAME_COMP_STRIDE (&frame1, i);

    GST_LOG_OBJECT (comp, "component %d", i);
    cssim[i] = gst_compare_ssim_component (comp,
        GST_VIDEO_FRAME_COMP_DATA (&frame1, i),
        GST_VIDEO_FRAME_COMP_DATA (&frame2, i), cw, ch, step, stride);
    GST_LOG_OBJECT (comp, "ssim[%d] = %f", i, cssim[i]);
  }

  gst_video_frame_unmap (&frame1);
  gst_video_frame_unmap (&frame2);

#ifndef GST_DISABLE_GST_DEBUG
  for (i = 0; i < 4; i++) {
    GST_DEBUG_OBJECT (comp, "ssim[%d] = %f, c[%d] = %f", i, cssim[i], i, c[i]);
  }
#endif

  ssim = cssim[0] * c[0] + cssim[1] * c[1] + cssim[2] * c[2] + cssim[3] * c[3];

  return ssim;

  /* ERRORS */
invalid_input:
  {
    GST_ERROR_OBJECT (comp, "ssim method needs raw video input");
    return 0;
  }
unsupported_input:
  {
    GST_ERROR_OBJECT (comp, "raw video format not supported %" GST_PTR_FORMAT,
        caps1);
    return 0;
  }
}
Пример #5
0
static GstFlowReturn
gst_ffmpegvidenc_handle_frame (GstVideoEncoder * encoder,
    GstVideoCodecFrame * frame)
{
  GstFFMpegVidEnc *ffmpegenc = (GstFFMpegVidEnc *) encoder;
  GstBuffer *outbuf;
  gint ret_size = 0, c;
  GstVideoInfo *info = &ffmpegenc->input_state->info;
  GstVideoFrame vframe;

  if (GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame))
    ffmpegenc->picture->pict_type = AV_PICTURE_TYPE_I;

  if (!gst_video_frame_map (&vframe, info, frame->input_buffer, GST_MAP_READ)) {
    GST_ERROR_OBJECT (encoder, "Failed to map input buffer");
    return GST_FLOW_ERROR;
  }

  /* Fill avpicture */
  for (c = 0; c < AV_NUM_DATA_POINTERS; c++) {
    if (c < GST_VIDEO_INFO_N_COMPONENTS (info)) {
      ffmpegenc->picture->data[c] = GST_VIDEO_FRAME_PLANE_DATA (&vframe, c);
      ffmpegenc->picture->linesize[c] =
          GST_VIDEO_FRAME_COMP_STRIDE (&vframe, c);
    } else {
      ffmpegenc->picture->data[c] = NULL;
      ffmpegenc->picture->linesize[c] = 0;
    }
  }

  ffmpegenc->picture->pts =
      gst_ffmpeg_time_gst_to_ff (frame->pts /
      ffmpegenc->context->ticks_per_frame, ffmpegenc->context->time_base);

  ffmpegenc_setup_working_buf (ffmpegenc);

  ret_size = avcodec_encode_video (ffmpegenc->context,
      ffmpegenc->working_buf, ffmpegenc->working_buf_size, ffmpegenc->picture);

  gst_video_frame_unmap (&vframe);

  if (ret_size < 0)
    goto encode_fail;

  /* Encoder needs more data */
  if (!ret_size)
    return GST_FLOW_OK;

  /* save stats info if there is some as well as a stats file */
  if (ffmpegenc->file && ffmpegenc->context->stats_out)
    if (fprintf (ffmpegenc->file, "%s", ffmpegenc->context->stats_out) < 0)
      GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, WRITE,
          (("Could not write to file \"%s\"."), ffmpegenc->filename),
          GST_ERROR_SYSTEM);

  gst_video_codec_frame_unref (frame);

  /* Get oldest frame */
  frame = gst_video_encoder_get_oldest_frame (encoder);

  /* Allocate output buffer */
  if (gst_video_encoder_allocate_output_frame (encoder, frame,
          ret_size) != GST_FLOW_OK) {
    gst_video_codec_frame_unref (frame);
    goto alloc_fail;
  }

  outbuf = frame->output_buffer;
  gst_buffer_fill (outbuf, 0, ffmpegenc->working_buf, ret_size);

  /* buggy codec may not set coded_frame */
  if (ffmpegenc->context->coded_frame) {
    if (ffmpegenc->context->coded_frame->key_frame)
      GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
  } else
    GST_WARNING_OBJECT (ffmpegenc, "codec did not provide keyframe info");

  /* Reset frame type */
  if (ffmpegenc->picture->pict_type)
    ffmpegenc->picture->pict_type = 0;

  return gst_video_encoder_finish_frame (encoder, frame);

  /* ERRORS */
encode_fail:
  {
#ifndef GST_DISABLE_GST_DEBUG
    GstFFMpegVidEncClass *oclass =
        (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
    GST_ERROR_OBJECT (ffmpegenc,
        "avenc_%s: failed to encode buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
    return GST_FLOW_OK;
  }
alloc_fail:
  {
#ifndef GST_DISABLE_GST_DEBUG
    GstFFMpegVidEncClass *oclass =
        (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
    GST_ERROR_OBJECT (ffmpegenc,
        "avenc_%s: failed to allocate buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
    return GST_FLOW_ERROR;
  }
}
Пример #6
0
static GstFlowReturn
gst_ffmpegvidenc_handle_frame (GstVideoEncoder * encoder,
                               GstVideoCodecFrame * frame)
{
    GstFFMpegVidEnc *ffmpegenc = (GstFFMpegVidEnc *) encoder;
    GstBuffer *outbuf;
    gint ret = 0, c;
    GstVideoInfo *info = &ffmpegenc->input_state->info;
    AVPacket *pkt;
    int have_data = 0;
    BufferInfo *buffer_info;

    if (ffmpegenc->interlaced) {
        ffmpegenc->picture->interlaced_frame = TRUE;
        /* if this is not the case, a filter element should be used to swap fields */
        ffmpegenc->picture->top_field_first =
            GST_BUFFER_FLAG_IS_SET (frame->input_buffer, GST_VIDEO_BUFFER_FLAG_TFF);
    }

    if (GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame))
        ffmpegenc->picture->pict_type = AV_PICTURE_TYPE_I;

    buffer_info = g_slice_new0 (BufferInfo);
    buffer_info->buffer = gst_buffer_ref (frame->input_buffer);

    if (!gst_video_frame_map (&buffer_info->vframe, info, frame->input_buffer,
                              GST_MAP_READ)) {
        GST_ERROR_OBJECT (encoder, "Failed to map input buffer");
        gst_buffer_unref (buffer_info->buffer);
        g_slice_free (BufferInfo, buffer_info);
        gst_video_codec_frame_unref (frame);
        return GST_FLOW_ERROR;
    }

    /* Fill avpicture */
    ffmpegenc->picture->buf[0] =
        av_buffer_create (NULL, 0, buffer_info_free, buffer_info, 0);
    for (c = 0; c < AV_NUM_DATA_POINTERS; c++) {
        if (c < GST_VIDEO_INFO_N_COMPONENTS (info)) {
            ffmpegenc->picture->data[c] =
                GST_VIDEO_FRAME_PLANE_DATA (&buffer_info->vframe, c);
            ffmpegenc->picture->linesize[c] =
                GST_VIDEO_FRAME_COMP_STRIDE (&buffer_info->vframe, c);
        } else {
            ffmpegenc->picture->data[c] = NULL;
            ffmpegenc->picture->linesize[c] = 0;
        }
    }

    ffmpegenc->picture->format = ffmpegenc->context->pix_fmt;
    ffmpegenc->picture->width = GST_VIDEO_FRAME_WIDTH (&buffer_info->vframe);
    ffmpegenc->picture->height = GST_VIDEO_FRAME_HEIGHT (&buffer_info->vframe);

    ffmpegenc->picture->pts =
        gst_ffmpeg_time_gst_to_ff (frame->pts /
                                   ffmpegenc->context->ticks_per_frame, ffmpegenc->context->time_base);

    have_data = 0;
    pkt = g_slice_new0 (AVPacket);

    ret =
        avcodec_encode_video2 (ffmpegenc->context, pkt, ffmpegenc->picture,
                               &have_data);

    av_frame_unref (ffmpegenc->picture);

    if (ret < 0 || !have_data)
        g_slice_free (AVPacket, pkt);

    if (ret < 0)
        goto encode_fail;

    /* Encoder needs more data */
    if (!have_data) {
        gst_video_codec_frame_unref (frame);
        return GST_FLOW_OK;
    }

    /* save stats info if there is some as well as a stats file */
    if (ffmpegenc->file && ffmpegenc->context->stats_out)
        if (fprintf (ffmpegenc->file, "%s", ffmpegenc->context->stats_out) < 0)
            GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, WRITE,
                               (("Could not write to file \"%s\"."), ffmpegenc->filename),
                               GST_ERROR_SYSTEM);

    gst_video_codec_frame_unref (frame);

    /* Get oldest frame */
    frame = gst_video_encoder_get_oldest_frame (encoder);

    outbuf =
        gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY, pkt->data,
                                     pkt->size, 0, pkt->size, pkt, gst_ffmpegvidenc_free_avpacket);
    frame->output_buffer = outbuf;

    if (pkt->flags & AV_PKT_FLAG_KEY)
        GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
    else
        GST_VIDEO_CODEC_FRAME_UNSET_SYNC_POINT (frame);

    return gst_video_encoder_finish_frame (encoder, frame);

    /* ERRORS */
encode_fail:
    {
#ifndef GST_DISABLE_GST_DEBUG
        GstFFMpegVidEncClass *oclass =
            (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
        GST_ERROR_OBJECT (ffmpegenc,
                          "avenc_%s: failed to encode buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
        /* avoid frame (and ts etc) piling up */
        return gst_video_encoder_finish_frame (encoder, frame);
    }
}