コード例 #1
0
static GstFlowReturn
gst_pixbufscale_transform_frame (GstVideoFilter * filter,
    GstVideoFrame * in, GstVideoFrame * out)
{
  GstPixbufScale *scale;
  GdkPixbuf *src_pixbuf, *dest_pixbuf;

  scale = GST_PIXBUFSCALE (filter);

  src_pixbuf =
      gdk_pixbuf_new_from_data (GST_VIDEO_FRAME_COMP_DATA (in, 0),
      GDK_COLORSPACE_RGB, FALSE, 8, GST_VIDEO_FRAME_WIDTH (in),
      GST_VIDEO_FRAME_HEIGHT (in),
      GST_VIDEO_FRAME_COMP_STRIDE (in, 0), NULL, NULL);

  dest_pixbuf =
      gdk_pixbuf_new_from_data (GST_VIDEO_FRAME_COMP_DATA (out, 0),
      GDK_COLORSPACE_RGB, FALSE, 8, GST_VIDEO_FRAME_WIDTH (out),
      GST_VIDEO_FRAME_HEIGHT (out),
      GST_VIDEO_FRAME_COMP_STRIDE (out, 0), NULL, NULL);

  gdk_pixbuf_scale (src_pixbuf, dest_pixbuf, 0, 0,
      GST_VIDEO_FRAME_WIDTH (out),
      GST_VIDEO_FRAME_HEIGHT (out), 0, 0,
      (double) GST_VIDEO_FRAME_WIDTH (out) / GST_VIDEO_FRAME_WIDTH (in),
      (double) GST_VIDEO_FRAME_HEIGHT (out) / GST_VIDEO_FRAME_HEIGHT (in),
      scale->gdk_method);

  g_object_unref (src_pixbuf);
  g_object_unref (dest_pixbuf);

  return GST_FLOW_OK;
}
コード例 #2
0
ファイル: GStreamerReader.cpp プロジェクト: msliu/gecko-dev
void GStreamerReader::ImageDataFromVideoFrame(GstVideoFrame *aFrame,
                                              PlanarYCbCrImage::Data *aData)
{
  NS_ASSERTION(GST_VIDEO_INFO_IS_YUV(&mVideoInfo),
               "Non-YUV video frame formats not supported");
  NS_ASSERTION(GST_VIDEO_FRAME_N_COMPONENTS(aFrame) == 3,
               "Unsupported number of components in video frame");

  aData->mPicX = aData->mPicY = 0;
  aData->mPicSize = gfx::IntSize(mPicture.width, mPicture.height);
  aData->mStereoMode = StereoMode::MONO;

  aData->mYChannel = GST_VIDEO_FRAME_COMP_DATA(aFrame, 0);
  aData->mYStride = GST_VIDEO_FRAME_COMP_STRIDE(aFrame, 0);
  aData->mYSize = gfx::IntSize(GST_VIDEO_FRAME_COMP_WIDTH(aFrame, 0),
                          GST_VIDEO_FRAME_COMP_HEIGHT(aFrame, 0));
  aData->mYSkip = GST_VIDEO_FRAME_COMP_PSTRIDE(aFrame, 0) - 1;
  aData->mCbCrStride = GST_VIDEO_FRAME_COMP_STRIDE(aFrame, 1);
  aData->mCbCrSize = gfx::IntSize(GST_VIDEO_FRAME_COMP_WIDTH(aFrame, 1),
                             GST_VIDEO_FRAME_COMP_HEIGHT(aFrame, 1));
  aData->mCbChannel = GST_VIDEO_FRAME_COMP_DATA(aFrame, 1);
  aData->mCrChannel = GST_VIDEO_FRAME_COMP_DATA(aFrame, 2);
  aData->mCbSkip = GST_VIDEO_FRAME_COMP_PSTRIDE(aFrame, 1) - 1;
  aData->mCrSkip = GST_VIDEO_FRAME_COMP_PSTRIDE(aFrame, 2) - 1;
}
コード例 #3
0
static GdkPixbuf *
gst_gdk_pixbuf_sink_get_pixbuf_from_buffer (GstGdkPixbufSink * sink,
    GstBuffer * buf)
{
  GdkPixbuf *pix = NULL;
  GstVideoFrame *frame;
  gint minsize, bytes_per_pixel;

  g_return_val_if_fail (GST_VIDEO_SINK_WIDTH (sink) > 0, NULL);
  g_return_val_if_fail (GST_VIDEO_SINK_HEIGHT (sink) > 0, NULL);

  frame = g_slice_new0 (GstVideoFrame);
  gst_video_frame_map (frame, &sink->info, buf, GST_MAP_READ);

  bytes_per_pixel = (sink->has_alpha) ? 4 : 3;

  /* last row needn't have row padding */
  minsize = (GST_VIDEO_FRAME_COMP_STRIDE (frame, 0) *
      (GST_VIDEO_SINK_HEIGHT (sink) - 1)) +
      (bytes_per_pixel * GST_VIDEO_SINK_WIDTH (sink));

  g_return_val_if_fail (gst_buffer_get_size (buf) >= minsize, NULL);

  gst_buffer_ref (buf);
  pix = gdk_pixbuf_new_from_data (GST_VIDEO_FRAME_COMP_DATA (frame, 0),
      GDK_COLORSPACE_RGB, sink->has_alpha, 8, GST_VIDEO_SINK_WIDTH (sink),
      GST_VIDEO_SINK_HEIGHT (sink), GST_VIDEO_FRAME_COMP_STRIDE (frame, 0),
      (GdkPixbufDestroyNotify) gst_gdk_pixbuf_sink_pixbuf_destroy_notify,
      frame);

  return pix;
}
コード例 #4
0
ファイル: gstsmpte.c プロジェクト: PeterXu/gst-mobile
static void
gst_smpte_blend_i420 (GstVideoFrame * frame1, GstVideoFrame * frame2,
    GstVideoFrame * oframe, GstMask * mask, gint border, gint pos)
{
  guint32 *maskp;
  gint value;
  gint i, j;
  gint min, max;
  guint8 *in1, *in2, *out, *in1u, *in1v, *in2u, *in2v, *outu, *outv;
  gint width, height;

  if (border == 0)
    border++;

  min = pos - border;
  max = pos;

  width = GST_VIDEO_FRAME_WIDTH (frame1);
  height = GST_VIDEO_FRAME_HEIGHT (frame1);

  in1 = GST_VIDEO_FRAME_COMP_DATA (frame1, 0);
  in2 = GST_VIDEO_FRAME_COMP_DATA (frame2, 0);
  out = GST_VIDEO_FRAME_COMP_DATA (oframe, 0);

  in1u = GST_VIDEO_FRAME_COMP_DATA (frame1, 1);
  in1v = GST_VIDEO_FRAME_COMP_DATA (frame1, 2);
  in2u = GST_VIDEO_FRAME_COMP_DATA (frame2, 1);
  in2v = GST_VIDEO_FRAME_COMP_DATA (frame2, 2);
  outu = GST_VIDEO_FRAME_COMP_DATA (oframe, 1);
  outv = GST_VIDEO_FRAME_COMP_DATA (oframe, 2);

  maskp = mask->data;

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      value = *maskp++;
      value = ((CLAMP (value, min, max) - min) << 8) / border;

      out[j] = ((in1[j] * value) + (in2[j] * (256 - value))) >> 8;
      if (!(i & 1) && !(j & 1)) {
        outu[j / 2] =
            ((in1u[j / 2] * value) + (in2u[j / 2] * (256 - value))) >> 8;
        outv[j / 2] =
            ((in1v[j / 2] * value) + (in2v[j / 2] * (256 - value))) >> 8;
      }
    }

    in1 += GST_VIDEO_FRAME_COMP_STRIDE (frame1, 0);
    in2 += GST_VIDEO_FRAME_COMP_STRIDE (frame2, 0);
    out += GST_VIDEO_FRAME_COMP_STRIDE (oframe, 0);

    if (!(i & 1)) {
      in1u += GST_VIDEO_FRAME_COMP_STRIDE (frame1, 1);
      in2u += GST_VIDEO_FRAME_COMP_STRIDE (frame2, 1);
      in1v += GST_VIDEO_FRAME_COMP_STRIDE (frame1, 2);
      in2v += GST_VIDEO_FRAME_COMP_STRIDE (frame1, 2);
      outu += GST_VIDEO_FRAME_COMP_STRIDE (oframe, 1);
      outv += GST_VIDEO_FRAME_COMP_STRIDE (oframe, 2);
    }
  }
コード例 #5
0
ファイル: gstav1enc.c プロジェクト: 0p1pp1/gst-plugins-bad
static void
gst_av1_enc_fill_image (GstAV1Enc * enc, GstVideoFrame * frame,
    aom_image_t * image)
{
  image->planes[AOM_PLANE_Y] = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
  image->planes[AOM_PLANE_U] = GST_VIDEO_FRAME_COMP_DATA (frame, 1);
  image->planes[AOM_PLANE_V] = GST_VIDEO_FRAME_COMP_DATA (frame, 2);

  image->stride[AOM_PLANE_Y] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
  image->stride[AOM_PLANE_U] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 1);
  image->stride[AOM_PLANE_V] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 2);
}
コード例 #6
0
static void
user_endrow_callback (png_structp png_ptr, png_bytep new_row,
    png_uint_32 row_num, int pass)
{
  GstPngDec *pngdec = NULL;

  pngdec = GST_PNGDEC (png_get_io_ptr (png_ptr));

  /* FIXME: implement interlaced pictures */

  /* If buffer_out doesn't exist, it means buffer_alloc failed, which 
   * will already have set the return code */
  if (GST_IS_BUFFER (pngdec->current_frame->output_buffer)) {
    GstVideoFrame frame;
    GstBuffer *buffer = pngdec->current_frame->output_buffer;
    size_t offset;
    gint width;
    guint8 *data;

    if (!gst_video_frame_map (&frame, &pngdec->output_state->info, buffer,
            GST_MAP_WRITE)) {
      pngdec->ret = GST_FLOW_ERROR;
      return;
    }

    data = GST_VIDEO_FRAME_COMP_DATA (&frame, 0);
    offset = row_num * GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0);
    GST_LOG ("got row %u, copying in buffer %p at offset %" G_GSIZE_FORMAT,
        (guint) row_num, pngdec->current_frame->output_buffer, offset);
    width = GST_ROUND_UP_4 (png_get_rowbytes (pngdec->png, pngdec->info));
    memcpy (data + offset, new_row, width);
    gst_video_frame_unmap (&frame);
    pngdec->ret = GST_FLOW_OK;
  }
}
コード例 #7
0
static void
theora_enc_init_buffer (th_ycbcr_buffer buf, GstVideoFrame * frame)
{
    GstVideoInfo vinfo;
    guint i;

    /* According to Theora developer Timothy Terriberry, the Theora
     * encoder will not use memory outside of pic_width/height, even when
     * the frame size is bigger. The values outside this region will be encoded
     * to default values.
     * Due to this, setting the frame's width/height as the buffer width/height
     * is perfectly ok, even though it does not strictly look ok.
     */

    gst_video_info_init (&vinfo);
    gst_video_info_set_format (&vinfo, GST_VIDEO_FRAME_FORMAT (frame),
                               GST_ROUND_UP_16 (GST_VIDEO_FRAME_WIDTH (frame)),
                               GST_ROUND_UP_16 (GST_VIDEO_FRAME_HEIGHT (frame)));

    for (i = 0; i < 3; i++) {
        buf[i].width = GST_VIDEO_INFO_COMP_WIDTH (&vinfo, i);
        buf[i].height = GST_VIDEO_INFO_COMP_HEIGHT (&vinfo, i);
        buf[i].data = GST_VIDEO_FRAME_COMP_DATA (frame, i);
        buf[i].stride = GST_VIDEO_FRAME_COMP_STRIDE (frame, i);
    }
}
コード例 #8
0
static void
fill_frame_planar16_3 (GstVideoFrame * frame, opj_image_t * image)
{
  gint c, x, y, w, h;
  guint16 *data_out, *tmp;
  const gint *data_in;
  gint dstride;
  gint shift;

  for (c = 0; c < 3; c++) {
    w = GST_VIDEO_FRAME_COMP_WIDTH (frame, c);
    h = GST_VIDEO_FRAME_COMP_HEIGHT (frame, c);
    dstride = GST_VIDEO_FRAME_COMP_STRIDE (frame, c) / 2;
    data_out = (guint16 *) GST_VIDEO_FRAME_COMP_DATA (frame, c);
    data_in = image->comps[c].data;
    shift = 16 - image->comps[c].prec;

    for (y = 0; y < h; y++) {
      tmp = data_out;

      for (x = 0; x < w; x++) {
        *tmp = *data_in << shift;
        tmp++;
        data_in++;
      }
      data_out += dstride;
    }
  }
}
コード例 #9
0
ファイル: gstpngdec.c プロジェクト: DylanZA/gst-plugins-good
static void
user_endrow_callback (png_structp png_ptr, png_bytep new_row,
    png_uint_32 row_num, int pass)
{
  GstPngDec *pngdec = NULL;

  pngdec = GST_PNGDEC (png_get_io_ptr (png_ptr));

  /* If buffer_out doesn't exist, it means buffer_alloc failed, which 
   * will already have set the return code */
  if (new_row && GST_IS_BUFFER (pngdec->current_frame->output_buffer)) {
    GstVideoFrame frame;
    GstBuffer *buffer = pngdec->current_frame->output_buffer;
    size_t offset;
    guint8 *data;

    if (!gst_video_frame_map (&frame, &pngdec->output_state->info, buffer,
            GST_MAP_WRITE)) {
      pngdec->ret = GST_FLOW_ERROR;
      return;
    }

    data = GST_VIDEO_FRAME_COMP_DATA (&frame, 0);
    offset = row_num * GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0);
    GST_LOG ("got row %u at pass %d, copying in buffer %p at offset %"
        G_GSIZE_FORMAT, (guint) row_num, pass,
        pngdec->current_frame->output_buffer, offset);
    png_progressive_combine_row (pngdec->png, data + offset, new_row);
    gst_video_frame_unmap (&frame);
    pngdec->ret = GST_FLOW_OK;
  } else
    pngdec->ret = GST_FLOW_OK;
}
コード例 #10
0
ファイル: gstx265enc.c プロジェクト: mbouron/gst-plugins-bad
/* chain function
 * this function does the actual processing
 */
static GstFlowReturn
gst_x265_enc_handle_frame (GstVideoEncoder * video_enc,
    GstVideoCodecFrame * frame)
{
  GstX265Enc *encoder = GST_X265_ENC (video_enc);
  GstVideoInfo *info = &encoder->input_state->info;
  GstFlowReturn ret;
  x265_picture pic_in;
  guint32 i_nal, i;
  FrameData *fdata;
  gint nplanes = 0;

  if (G_UNLIKELY (encoder->x265enc == NULL))
    goto not_inited;

  /* set up input picture */
  x265_picture_init (&encoder->x265param, &pic_in);

  fdata = gst_x265_enc_queue_frame (encoder, frame, info);
  if (!fdata)
    goto invalid_frame;

  pic_in.colorSpace =
      gst_x265_enc_gst_to_x265_video_format (info->finfo->format, &nplanes);
  for (i = 0; i < nplanes; i++) {
    pic_in.planes[i] = GST_VIDEO_FRAME_PLANE_DATA (&fdata->vframe, i);
    pic_in.stride[i] = GST_VIDEO_FRAME_COMP_STRIDE (&fdata->vframe, i);
  }

  pic_in.sliceType = X265_TYPE_AUTO;
  pic_in.pts = frame->pts;
  pic_in.dts = frame->dts;
  pic_in.bitDepth = info->finfo->depth[0];
  pic_in.userData = GINT_TO_POINTER (frame->system_frame_number);

  ret = gst_x265_enc_encode_frame (encoder, &pic_in, frame, &i_nal, TRUE);

  /* input buffer is released later on */
  return ret;

/* ERRORS */
not_inited:
  {
    GST_WARNING_OBJECT (encoder, "Got buffer before set_caps was called");
    return GST_FLOW_NOT_NEGOTIATED;
  }
invalid_frame:
  {
    GST_ERROR_OBJECT (encoder, "Failed to map frame");
    return GST_FLOW_ERROR;
  }
}
コード例 #11
0
ファイル: gstjpegdec.c プロジェクト: an146/gst-plugins-good
static void
gst_jpeg_dec_decode_rgb (GstJpegDec * dec, GstVideoFrame * frame)
{
  guchar *r_rows[16], *g_rows[16], *b_rows[16];
  guchar **scanarray[3] = { r_rows, g_rows, b_rows };
  gint i, j, k;
  gint lines;
  guint8 *base[3];
  guint pstride, rstride;
  gint width, height;

  GST_DEBUG_OBJECT (dec, "indirect decoding of RGB");

  width = GST_VIDEO_FRAME_WIDTH (frame);
  height = GST_VIDEO_FRAME_HEIGHT (frame);

  if (G_UNLIKELY (!gst_jpeg_dec_ensure_buffers (dec, GST_ROUND_UP_32 (width))))
    return;

  for (i = 0; i < 3; i++)
    base[i] = GST_VIDEO_FRAME_COMP_DATA (frame, i);

  pstride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0);
  rstride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);

  memcpy (r_rows, dec->idr_y, 16 * sizeof (gpointer));
  memcpy (g_rows, dec->idr_u, 16 * sizeof (gpointer));
  memcpy (b_rows, dec->idr_v, 16 * sizeof (gpointer));

  i = 0;
  while (i < height) {
    lines = jpeg_read_raw_data (&dec->cinfo, scanarray, DCTSIZE);
    if (G_LIKELY (lines > 0)) {
      for (j = 0; (j < DCTSIZE) && (i < height); j++, i++) {
        gint p;

        p = 0;
        for (k = 0; k < width; k++) {
          base[0][p] = r_rows[j][k];
          base[1][p] = g_rows[j][k];
          base[2][p] = b_rows[j][k];
          p += pstride;
        }
        base[0] += rstride;
        base[1] += rstride;
        base[2] += rstride;
      }
    } else {
      GST_INFO_OBJECT (dec, "jpeg_read_raw_data() returned 0");
    }
  }
}
コード例 #12
0
/* Allocate buffer and copy image data into Y444 format */
static GstFlowReturn
daala_handle_image (GstDaalaDec * dec, od_img * img, GstVideoCodecFrame * frame)
{
  GstVideoDecoder *decoder = GST_VIDEO_DECODER (dec);
  gint width, height, stride;
  GstFlowReturn result;
  gint i, comp;
  guint8 *dest, *src;
  GstVideoFrame vframe;

  result = gst_video_decoder_allocate_output_frame (decoder, frame);

  if (G_UNLIKELY (result != GST_FLOW_OK)) {
    GST_DEBUG_OBJECT (dec, "could not get buffer, reason: %s",
        gst_flow_get_name (result));
    return result;
  }

  /* if only libdaala would allow us to give it a destination frame */
  GST_CAT_TRACE_OBJECT (GST_CAT_PERFORMANCE, dec,
      "doing unavoidable video frame copy");

  if (G_UNLIKELY (!gst_video_frame_map (&vframe, &dec->output_state->info,
              frame->output_buffer, GST_MAP_WRITE)))
    goto invalid_frame;

  for (comp = 0; comp < 3; comp++) {
    width = GST_VIDEO_FRAME_COMP_WIDTH (&vframe, comp);
    height = GST_VIDEO_FRAME_COMP_HEIGHT (&vframe, comp);
    stride = GST_VIDEO_FRAME_COMP_STRIDE (&vframe, comp);
    dest = GST_VIDEO_FRAME_COMP_DATA (&vframe, comp);

    src = img->planes[comp].data;

    for (i = 0; i < height; i++) {
      memcpy (dest, src, width);

      dest += stride;
      src += img->planes[comp].ystride;
    }
  }
  gst_video_frame_unmap (&vframe);

  return GST_FLOW_OK;
invalid_frame:
  {
    GST_DEBUG_OBJECT (dec, "could not map video frame");
    return GST_FLOW_ERROR;
  }
}
コード例 #13
0
static void
gst_vp9_dec_image_to_buffer (GstVP9Dec * dec, const vpx_image_t * img,
    GstBuffer * buffer)
{
  int deststride, srcstride, height, width, line, comp;
  guint8 *dest, *src;
  GstVideoFrame frame;
  GstVideoInfo *info = &dec->output_state->info;

  if (!gst_video_frame_map (&frame, info, buffer, GST_MAP_WRITE)) {
    GST_ERROR_OBJECT (dec, "Could not map video buffer");
    return;
  }

  for (comp = 0; comp < 3; comp++) {
    dest = GST_VIDEO_FRAME_COMP_DATA (&frame, comp);
    src = img->planes[comp];
    width = GST_VIDEO_FRAME_COMP_WIDTH (&frame, comp)
        * GST_VIDEO_FRAME_COMP_PSTRIDE (&frame, comp);
    height = GST_VIDEO_FRAME_COMP_HEIGHT (&frame, comp);
    deststride = GST_VIDEO_FRAME_COMP_STRIDE (&frame, comp);
    srcstride = img->stride[comp];

    if (srcstride == deststride) {
      GST_TRACE_OBJECT (dec, "Stride matches. Comp %d: %d, copying full plane",
          comp, srcstride);
      memcpy (dest, src, srcstride * height);
    } else {
      GST_TRACE_OBJECT (dec, "Stride mismatch. Comp %d: %d != %d, copying "
          "line by line.", comp, srcstride, deststride);
      for (line = 0; line < height; line++) {
        memcpy (dest, src, width);
        dest += deststride;
        src += srcstride;
      }
    }
  }

  gst_video_frame_unmap (&frame);
}
コード例 #14
0
ファイル: gstgamma.c プロジェクト: adesurya/gst-mobile
static void
gst_gamma_planar_yuv_ip (GstGamma * gamma, GstVideoFrame * frame)
{
  gint i, j, height;
  gint width, stride, row_wrap;
  const guint8 *table = gamma->gamma_table;
  guint8 *data;

  data = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
  stride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
  width = GST_VIDEO_FRAME_COMP_WIDTH (frame, 0);
  height = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 0);
  row_wrap = stride - width;

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      *data = table[*data];
      data++;
    }
    data += row_wrap;
  }
}
コード例 #15
0
static void
sink_handoff_cb_I420 (GstElement * object, GstBuffer * buffer, GstPad * pad,
    gpointer user_data)
{
  guint *sink_pos = (guint *) user_data;
  gboolean contains_text = (*sink_pos == 1 || *sink_pos == 2);
  guint c, i, j;
  gboolean all_red = TRUE;
  guint8 *comp;
  gint comp_stride, comp_width, comp_height;
  const guint8 color[] = { 81, 90, 240 };
  GstVideoInfo info;
  GstVideoFrame frame;

  gst_video_info_init (&info);
  gst_video_info_set_format (&info, GST_VIDEO_FORMAT_I420, 640, 480);

  gst_video_frame_map (&frame, &info, buffer, GST_MAP_READ);

  for (c = 0; c < 3; c++) {
    comp = GST_VIDEO_FRAME_COMP_DATA (&frame, c);
    comp_stride = GST_VIDEO_FRAME_COMP_STRIDE (&frame, c);
    comp_width = GST_VIDEO_FRAME_COMP_WIDTH (&frame, c);
    comp_height = GST_VIDEO_FRAME_COMP_HEIGHT (&frame, c);

    for (i = 0; i < comp_height; i++) {
      for (j = 0; j < comp_width; j++) {
        all_red = all_red && (comp[i * comp_stride + j] == color[c]);
      }
    }
  }
  gst_video_frame_unmap (&frame);

  fail_unless (contains_text != all_red,
      "Frame %d is incorrect (all red %d, contains text %d)", *sink_pos,
      all_red, contains_text);
  *sink_pos = *sink_pos + 1;
}
コード例 #16
0
static GstFlowReturn
gst_smooth_transform_frame (GstVideoFilter * vfilter, GstVideoFrame * in_frame,
    GstVideoFrame * out_frame)
{
  GstSmooth *smooth;

  smooth = GST_SMOOTH (vfilter);

  if (!smooth->active) {
    gst_video_frame_copy (out_frame, in_frame);
    return GST_FLOW_OK;
  }

  smooth_filter (GST_VIDEO_FRAME_COMP_DATA (out_frame, 0),
      GST_VIDEO_FRAME_COMP_DATA (in_frame, 0),
      GST_VIDEO_FRAME_COMP_WIDTH (in_frame, 0),
      GST_VIDEO_FRAME_COMP_HEIGHT (in_frame, 0),
      GST_VIDEO_FRAME_COMP_STRIDE (in_frame, 0),
      GST_VIDEO_FRAME_COMP_STRIDE (out_frame, 0),
      smooth->tolerance, smooth->filtersize);
  if (!smooth->luma_only) {
    smooth_filter (GST_VIDEO_FRAME_COMP_DATA (out_frame, 1),
        GST_VIDEO_FRAME_COMP_DATA (in_frame, 1),
        GST_VIDEO_FRAME_COMP_WIDTH (in_frame, 1),
        GST_VIDEO_FRAME_COMP_HEIGHT (in_frame, 1),
        GST_VIDEO_FRAME_COMP_STRIDE (in_frame, 1),
        GST_VIDEO_FRAME_COMP_STRIDE (out_frame, 1),
        smooth->tolerance, smooth->filtersize);
    smooth_filter (GST_VIDEO_FRAME_COMP_DATA (out_frame, 2),
        GST_VIDEO_FRAME_COMP_DATA (in_frame, 2),
        GST_VIDEO_FRAME_COMP_WIDTH (in_frame, 2),
        GST_VIDEO_FRAME_COMP_HEIGHT (in_frame, 2),
        GST_VIDEO_FRAME_COMP_STRIDE (in_frame, 2),
        GST_VIDEO_FRAME_COMP_STRIDE (out_frame, 2),
        smooth->tolerance, smooth->filtersize);
  } else {
    gst_video_frame_copy_plane (out_frame, in_frame, 1);
    gst_video_frame_copy_plane (out_frame, in_frame, 2);
  }

  return GST_FLOW_OK;
}
コード例 #17
0
/* Allocate buffer and copy image data into Y444 format */
static GstFlowReturn
theora_handle_image (GstTheoraDec * dec, th_ycbcr_buffer buf,
    GstVideoCodecFrame * frame)
{
  GstVideoDecoder *decoder = GST_VIDEO_DECODER (dec);
  gint width, height, stride;
  GstFlowReturn result;
  gint i, comp;
  guint8 *dest, *src;
  GstVideoFrame vframe;
  gint pic_width, pic_height;
  gint offset_x, offset_y;

  result = gst_video_decoder_allocate_output_frame (decoder, frame);

  if (G_UNLIKELY (result != GST_FLOW_OK)) {
    GST_DEBUG_OBJECT (dec, "could not get buffer, reason: %s",
        gst_flow_get_name (result));
    return result;
  }

  if (!dec->can_crop) {
    /* we need to crop the hard way */
    offset_x = dec->info.pic_x;
    offset_y = dec->info.pic_y;
    pic_width = dec->info.pic_width;
    pic_height = dec->info.pic_height;
    /* Ensure correct offsets in chroma for formats that need it
     * by rounding the offset. libtheora will add proper pixels,
     * so no need to handle them ourselves. */
    if (offset_x & 1 && dec->info.pixel_fmt != TH_PF_444)
      offset_x--;
    if (offset_y & 1 && dec->info.pixel_fmt == TH_PF_420)
      offset_y--;
  } else {
    /* copy the whole frame */
    offset_x = 0;
    offset_y = 0;
    pic_width = dec->info.frame_width;
    pic_height = dec->info.frame_height;

    if (dec->info.pic_width != dec->info.frame_width ||
        dec->info.pic_height != dec->info.frame_height ||
        dec->info.pic_x != 0 || dec->info.pic_y != 0) {
      GstVideoMeta *vmeta;
      GstVideoCropMeta *cmeta;

      vmeta = gst_buffer_get_video_meta (frame->output_buffer);
      /* If the buffer pool didn't add the meta already
       * we add it ourselves here */
      if (!vmeta)
        vmeta = gst_buffer_add_video_meta (frame->output_buffer,
            GST_VIDEO_FRAME_FLAG_NONE,
            dec->output_state->info.finfo->format,
            dec->info.frame_width, dec->info.frame_height);

      /* Just to be sure that the buffer pool doesn't do something
       * completely weird and we would crash later
       */
      g_assert (vmeta->format == dec->output_state->info.finfo->format);
      g_assert (vmeta->width == dec->info.frame_width);
      g_assert (vmeta->height == dec->info.frame_height);

      cmeta = gst_buffer_add_video_crop_meta (frame->output_buffer);

      /* we can do things slightly more efficient when we know that
       * downstream understands clipping */
      cmeta->x = dec->info.pic_x;
      cmeta->y = dec->info.pic_y;
      cmeta->width = dec->info.pic_width;
      cmeta->height = dec->info.pic_height;
    }
  }

  /* if only libtheora would allow us to give it a destination frame */
  GST_CAT_TRACE_OBJECT (GST_CAT_PERFORMANCE, dec,
      "doing unavoidable video frame copy");

  if (G_UNLIKELY (!gst_video_frame_map (&vframe, &dec->output_state->info,
              frame->output_buffer, GST_MAP_WRITE)))
    goto invalid_frame;

  for (comp = 0; comp < 3; comp++) {
    width =
        GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (vframe.info.finfo, comp, pic_width);
    height =
        GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (vframe.info.finfo, comp,
        pic_height);
    stride = GST_VIDEO_FRAME_COMP_STRIDE (&vframe, comp);
    dest = GST_VIDEO_FRAME_COMP_DATA (&vframe, comp);

    src = buf[comp].data;
    src += ((height == pic_height) ? offset_y : offset_y / 2)
        * buf[comp].stride;
    src += (width == pic_width) ? offset_x : offset_x / 2;

    for (i = 0; i < height; i++) {
      memcpy (dest, src, width);

      dest += stride;
      src += buf[comp].stride;
    }
  }
  gst_video_frame_unmap (&vframe);

  return GST_FLOW_OK;
invalid_frame:
  {
    GST_DEBUG_OBJECT (dec, "could not map video frame");
    return GST_FLOW_ERROR;
  }
}
コード例 #18
0
static void
gst_video_detect_yuv (GstSimpleVideoMarkDetect * simplevideomarkdetect,
    GstVideoFrame * frame)
{
  gdouble brightness;
  gint i, pw, ph, row_stride, pixel_stride;
  gint width, height, offset_calc, x, y;
  guint8 *d;
  guint64 pattern_data;
  gint total_pattern;

  width = frame->info.width;
  height = frame->info.height;

  pw = simplevideomarkdetect->pattern_width;
  ph = simplevideomarkdetect->pattern_height;
  row_stride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
  pixel_stride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0);

  d = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
  /* move to start of bottom left, adjust for offsets */
  offset_calc =
      row_stride * (height - ph - simplevideomarkdetect->bottom_offset) +
      pixel_stride * simplevideomarkdetect->left_offset;
  x = simplevideomarkdetect->left_offset;
  y = height - ph - simplevideomarkdetect->bottom_offset;

  total_pattern =
      simplevideomarkdetect->pattern_count +
      simplevideomarkdetect->pattern_data_count;
  /* If x and y offset values are outside the video, no need to analyze */
  if ((x + (pw * total_pattern)) < 0 || x > width || (y + height) < 0
      || y > height) {
    GST_ERROR_OBJECT (simplevideomarkdetect,
        "simplevideomarkdetect pattern is outside the video. Not Analyzing.");
    return;
  }

  /* Offset calculation less than 0, then reset to 0 */
  if (offset_calc < 0)
    offset_calc = 0;
  /* Y position of mark is negative or pattern exceeds the video height,
     then recalculate pattern height for partial display */
  if (y < 0)
    ph += y;
  else if ((y + ph) > height)
    ph = height - y;
  /* If pattern height is less than 0, need not analyze anything */
  if (ph < 0)
    return;

  /* move to start of bottom left */
  d += offset_calc;

  /* analyze the bottom left pixels */
  for (i = 0; i < simplevideomarkdetect->pattern_count; i++) {
    gint draw_pw;
    /* calc brightness of width * height box */
    brightness =
        gst_video_detect_calc_brightness (simplevideomarkdetect, d, pw, ph,
        row_stride, pixel_stride);

    GST_DEBUG_OBJECT (simplevideomarkdetect, "brightness %f", brightness);

    if (i & 1) {
      /* odd pixels must be white, all pixels darker than the center +
       * sensitivity are considered wrong. */
      if (brightness <
          (simplevideomarkdetect->pattern_center +
              simplevideomarkdetect->pattern_sensitivity))
        goto no_pattern;
    } else {
      /* even pixels must be black, pixels lighter than the center - sensitivity
       * are considered wrong. */
      if (brightness >
          (simplevideomarkdetect->pattern_center -
              simplevideomarkdetect->pattern_sensitivity))
        goto no_pattern;
    }

    /* X position of mark is negative or pattern exceeds the video width,
       then recalculate pattern width for partial display */
    draw_pw = calculate_pw (pw, x, width);
    /* If pattern width is less than 0, continue with the next pattern */
    if (draw_pw < 0)
      continue;

    /* move to i-th pattern */
    d += pixel_stride * draw_pw;
    x += draw_pw;

    if ((x + (pw * (total_pattern - i - 1))) < 0 || x >= width)
      break;
  }
  GST_DEBUG_OBJECT (simplevideomarkdetect, "found pattern");

  pattern_data = 0;

  /* get the data of the pattern */
  for (i = 0; i < simplevideomarkdetect->pattern_data_count; i++) {
    gint draw_pw;
    /* calc brightness of width * height box */
    brightness =
        gst_video_detect_calc_brightness (simplevideomarkdetect, d, pw, ph,
        row_stride, pixel_stride);
    /* update pattern, we just use the center to decide between black and white. */
    pattern_data <<= 1;
    if (brightness > simplevideomarkdetect->pattern_center)
      pattern_data |= 1;

    /* X position of mark is negative or pattern exceeds the video width,
       then recalculate pattern width for partial display */
    draw_pw = calculate_pw (pw, x, width);
    /* If pattern width is less than 0, continue with the next pattern */
    if (draw_pw < 0)
      continue;

    /* move to i-th pattern data */
    d += pixel_stride * draw_pw;
    x += draw_pw;

    if ((x + (pw * (simplevideomarkdetect->pattern_data_count - i - 1))) < 0
        || x >= width)
      break;
  }

  GST_DEBUG_OBJECT (simplevideomarkdetect, "have data %" G_GUINT64_FORMAT,
      pattern_data);

  simplevideomarkdetect->in_pattern = TRUE;
  gst_video_detect_post_message (simplevideomarkdetect, frame->buffer,
      pattern_data);

  return;

no_pattern:
  {
    GST_DEBUG_OBJECT (simplevideomarkdetect, "no pattern found");
    if (simplevideomarkdetect->in_pattern) {
      simplevideomarkdetect->in_pattern = FALSE;
      gst_video_detect_post_message (simplevideomarkdetect, frame->buffer, 0);
    }
    return;
  }
}
コード例 #19
0
ファイル: gstspu-pgs.c プロジェクト: drothlis/gst-plugins-bad
static void
pgs_composition_object_render (PgsCompositionObject * obj, SpuState * state,
    GstVideoFrame * frame)
{
  SpuColour *colour;
  guint8 *planes[3];            /* YUV frame pointers */
  gint strides[3];
  guint8 *data, *end;
  guint16 obj_w;
  guint16 obj_h G_GNUC_UNUSED;
  guint x, y, i, min_x, max_x;

  if (G_UNLIKELY (obj->rle_data == NULL || obj->rle_data_size == 0
          || obj->rle_data_used != obj->rle_data_size))
    return;

  data = obj->rle_data;
  end = data + obj->rle_data_used;

  if (data + 4 > end)
    return;

  /* FIXME: Calculate and use the cropping window for the output, as the
   * intersection of the crop rectangle for this object (if any) and the
   * window specified by the object's window_id */

  /* Store the start of each plane */
  planes[0] = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
  planes[1] = GST_VIDEO_FRAME_COMP_DATA (frame, 1);
  planes[2] = GST_VIDEO_FRAME_COMP_DATA (frame, 2);

  strides[0] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
  strides[1] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 1);
  strides[2] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 2);

  y = MIN (obj->y, state->info.height);

  planes[0] += strides[0] * y;
  planes[1] += strides[1] * (y / 2);
  planes[2] += strides[2] * (y / 2);

  /* RLE data: */
  obj_w = GST_READ_UINT16_BE (data);
  obj_h = GST_READ_UINT16_BE (data + 2);
  data += 4;

  min_x = MIN (obj->x, strides[0]);
  max_x = MIN (obj->x + obj_w, strides[0]);

  state->comp_left = x = min_x;
  state->comp_right = max_x;

  gstspu_clear_comp_buffers (state);

  while (data < end) {
    guint8 pal_id;
    guint16 run_len;

    pal_id = *data++;
    if (pal_id != 0) {
      run_len = 1;
    } else {
      if (data + 1 > end)
        return;
      switch (data[0] & 0xC0) {
        case 0x00:
          run_len = (data[0] & 0x3f);
          data++;
          break;
        case 0x40:
          if (data + 2 > end)
            return;
          run_len = ((data[0] << 8) | data[1]) & 0x3fff;
          data += 2;
          break;
        case 0x80:
          if (data + 2 > end)
            return;
          run_len = (data[0] & 0x3f);
          pal_id = data[1];
          data += 2;
          break;
        case 0xC0:
          if (data + 3 > end)
            return;
          run_len = ((data[0] << 8) | data[1]) & 0x3fff;
          pal_id = data[2];
          data += 3;
          break;
        default:
          run_len = 0;
          break;
      }
    }

    colour = &state->pgs.palette[pal_id];
    if (colour->A) {
      guint32 inv_A = 0xff - colour->A;
      if (G_UNLIKELY (x + run_len > max_x))
        run_len = (max_x - x);

      for (i = 0; i < run_len; i++) {
        planes[0][x] = (inv_A * planes[0][x] + colour->Y) / 0xff;

        state->comp_bufs[0][x / 2] += colour->U;
        state->comp_bufs[1][x / 2] += colour->V;
        state->comp_bufs[2][x / 2] += colour->A;
        x++;
      }
    } else {
      x += run_len;
    }

    if (!run_len || x > max_x) {
      x = min_x;
      planes[0] += strides[0];

      if (y % 2) {
        gstspu_blend_comp_buffers (state, planes);
        gstspu_clear_comp_buffers (state);

        planes[1] += strides[1];
        planes[2] += strides[2];
      }
      y++;
      if (y >= state->info.height)
        return;                 /* Hit the bottom */
    }
  }

  if (y % 2)
    gstspu_blend_comp_buffers (state, planes);
}
コード例 #20
0
ファイル: greedyh.c プロジェクト: collects/gst-plugins-good
static void
deinterlace_frame_di_greedyh_packed (GstDeinterlaceMethod * method,
    const GstDeinterlaceField * history, guint history_count,
    GstVideoFrame * outframe, int cur_field_idx)
{
  GstDeinterlaceMethodGreedyH *self = GST_DEINTERLACE_METHOD_GREEDY_H (method);
  GstDeinterlaceMethodGreedyHClass *klass =
      GST_DEINTERLACE_METHOD_GREEDY_H_GET_CLASS (self);
  gint InfoIsOdd = 0;
  gint Line;
  gint RowStride = GST_VIDEO_FRAME_COMP_STRIDE (outframe, 0);
  gint FieldHeight = GST_VIDEO_INFO_HEIGHT (method->vinfo) / 2;
  gint Pitch = RowStride * 2;
  const guint8 *L1;             // ptr to Line1, of 3
  const guint8 *L2;             // ptr to Line2, the weave line
  const guint8 *L3;             // ptr to Line3
  const guint8 *L2P;            // ptr to prev Line2
  guint8 *Dest = GST_VIDEO_FRAME_COMP_DATA (outframe, 0);
  ScanlineFunction scanline;

  if (cur_field_idx + 2 > history_count || cur_field_idx < 1) {
    GstDeinterlaceMethod *backup_method;

    backup_method = g_object_new (gst_deinterlace_method_linear_get_type (),
        NULL);

    gst_deinterlace_method_setup (backup_method, method->vinfo);
    gst_deinterlace_method_deinterlace_frame (backup_method,
        history, history_count, outframe, cur_field_idx);

    g_object_unref (backup_method);
    return;
  }

  cur_field_idx += 2;

  switch (GST_VIDEO_INFO_FORMAT (method->vinfo)) {
    case GST_VIDEO_FORMAT_YUY2:
    case GST_VIDEO_FORMAT_YVYU:
      scanline = klass->scanline_yuy2;
      break;
    case GST_VIDEO_FORMAT_UYVY:
      scanline = klass->scanline_uyvy;
      break;
    case GST_VIDEO_FORMAT_AYUV:
      scanline = klass->scanline_ayuv;
      break;
    default:
      g_assert_not_reached ();
      return;
  }

  // copy first even line no matter what, and the first odd line if we're
  // processing an EVEN field. (note diff from other deint rtns.)

  if (history[cur_field_idx - 1].flags == PICTURE_INTERLACED_BOTTOM) {
    InfoIsOdd = 1;

    L1 = GST_VIDEO_FRAME_COMP_DATA (history[cur_field_idx - 2].frame, 0);
    if (history[cur_field_idx - 2].flags & PICTURE_INTERLACED_BOTTOM)
      L1 += RowStride;

    L2 = GST_VIDEO_FRAME_COMP_DATA (history[cur_field_idx - 1].frame, 0);
    if (history[cur_field_idx - 1].flags & PICTURE_INTERLACED_BOTTOM)
      L2 += RowStride;

    L3 = L1 + Pitch;
    L2P = GST_VIDEO_FRAME_COMP_DATA (history[cur_field_idx - 3].frame, 0);
    if (history[cur_field_idx - 3].flags & PICTURE_INTERLACED_BOTTOM)
      L2P += RowStride;

    // copy first even line
    memcpy (Dest, L1, RowStride);
    Dest += RowStride;
  } else {
    InfoIsOdd = 0;
    L1 = GST_VIDEO_FRAME_COMP_DATA (history[cur_field_idx - 2].frame, 0);
    if (history[cur_field_idx - 2].flags & PICTURE_INTERLACED_BOTTOM)
      L1 += RowStride;

    L2 = (guint8 *) GST_VIDEO_FRAME_COMP_DATA (history[cur_field_idx -
            1].frame, 0) + Pitch;
    if (history[cur_field_idx - 1].flags & PICTURE_INTERLACED_BOTTOM)
      L2 += RowStride;

    L3 = L1 + Pitch;
    L2P =
        (guint8 *) GST_VIDEO_FRAME_COMP_DATA (history[cur_field_idx - 3].frame,
        0) + Pitch;
    if (history[cur_field_idx - 3].flags & PICTURE_INTERLACED_BOTTOM)
      L2P += RowStride;

    // copy first even line
    memcpy (Dest, L1, RowStride);
    Dest += RowStride;
    // then first odd line
    memcpy (Dest, L1, RowStride);
    Dest += RowStride;
  }

  for (Line = 0; Line < (FieldHeight - 1); ++Line) {
    scanline (self, L1, L2, L3, L2P, Dest, RowStride);
    Dest += RowStride;
    memcpy (Dest, L3, RowStride);
    Dest += RowStride;

    L1 += Pitch;
    L2 += Pitch;
    L3 += Pitch;
    L2P += Pitch;
  }

  if (InfoIsOdd) {
    memcpy (Dest, L2, RowStride);
  }
}
コード例 #21
0
static GstFlowReturn
gst_rtp_vraw_pay_handle_buffer (GstRTPBasePayload * payload, GstBuffer * buffer)
{
  GstRtpVRawPay *rtpvrawpay;
  GstFlowReturn ret = GST_FLOW_OK;
  gfloat packets_per_packline;
  guint pgroups_per_packet;
  guint packlines_per_list, buffers_per_list;
  guint lines_delay;            /* after how many packed lines we push out a buffer list */
  guint last_line;              /* last pack line number we pushed out a buffer list     */
  guint line, offset;
  guint8 *p0, *yp, *up, *vp;
  guint ystride, uvstride;
  guint xinc, yinc;
  guint pgroup;
  guint mtu;
  guint width, height;
  gint field, fields;
  GstVideoFormat format;
  GstVideoFrame frame;
  gint interlaced;
  gboolean use_buffer_lists;
  GstBufferList *list = NULL;
  GstRTPBuffer rtp = { NULL, };

  rtpvrawpay = GST_RTP_VRAW_PAY (payload);

  gst_video_frame_map (&frame, &rtpvrawpay->vinfo, buffer, GST_MAP_READ);

  GST_LOG_OBJECT (rtpvrawpay, "new frame of %" G_GSIZE_FORMAT " bytes",
      gst_buffer_get_size (buffer));

  /* get pointer and strides of the planes */
  p0 = GST_VIDEO_FRAME_PLANE_DATA (&frame, 0);
  yp = GST_VIDEO_FRAME_COMP_DATA (&frame, 0);
  up = GST_VIDEO_FRAME_COMP_DATA (&frame, 1);
  vp = GST_VIDEO_FRAME_COMP_DATA (&frame, 2);

  ystride = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0);
  uvstride = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 1);

  mtu = GST_RTP_BASE_PAYLOAD_MTU (payload);

  /* amount of bytes for one pixel */
  pgroup = rtpvrawpay->pgroup;
  width = GST_VIDEO_INFO_WIDTH (&rtpvrawpay->vinfo);
  height = GST_VIDEO_INFO_HEIGHT (&rtpvrawpay->vinfo);

  interlaced = GST_VIDEO_INFO_IS_INTERLACED (&rtpvrawpay->vinfo);

  format = GST_VIDEO_INFO_FORMAT (&rtpvrawpay->vinfo);

  yinc = rtpvrawpay->yinc;
  xinc = rtpvrawpay->xinc;

  /* after how many packed lines we push out a buffer list */
  lines_delay = GST_ROUND_UP_4 (height / rtpvrawpay->chunks_per_frame);

  /* calculate how many buffers we expect to store in a single buffer list */
  pgroups_per_packet = (mtu - (12 + 14)) / pgroup;
  packets_per_packline = width / (xinc * pgroups_per_packet * 1.0);
  packlines_per_list = height / (yinc * rtpvrawpay->chunks_per_frame);
  buffers_per_list = packlines_per_list * packets_per_packline;
  buffers_per_list = GST_ROUND_UP_8 (buffers_per_list);

  use_buffer_lists = (rtpvrawpay->chunks_per_frame < (height / yinc));

  fields = 1 + interlaced;

  /* start with line 0, offset 0 */
  for (field = 0; field < fields; field++) {
    line = field;
    offset = 0;
    last_line = 0;

    if (use_buffer_lists)
      list = gst_buffer_list_new_sized (buffers_per_list);

    /* write all lines */
    while (line < height) {
      guint left, pack_line;
      GstBuffer *out;
      guint8 *outdata, *headers;
      gboolean next_line, complete = FALSE;
      guint length, cont, pixels;

      /* get the max allowed payload length size, we try to fill the complete MTU */
      left = gst_rtp_buffer_calc_payload_len (mtu, 0, 0);
      out = gst_rtp_buffer_new_allocate (left, 0, 0);

      if (field == 0) {
        GST_BUFFER_PTS (out) = GST_BUFFER_PTS (buffer);
      } else {
        GST_BUFFER_PTS (out) = GST_BUFFER_PTS (buffer) +
            GST_BUFFER_DURATION (buffer) / 2;
      }

      gst_rtp_buffer_map (out, GST_MAP_WRITE, &rtp);
      outdata = gst_rtp_buffer_get_payload (&rtp);

      GST_LOG_OBJECT (rtpvrawpay, "created buffer of size %u for MTU %u", left,
          mtu);

      /*
       *   0                   1                   2                   3
       *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
       *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       *  |   Extended Sequence Number    |            Length             |
       *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       *  |F|          Line No            |C|           Offset            |
       *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       *  |            Length             |F|          Line No            |
       *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       *  |C|           Offset            |                               .
       *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+                               .
       *  .                                                               .
       *  .                 Two (partial) lines of video data             .
       *  .                                                               .
       *  +---------------------------------------------------------------+
       */

      /* need 2 bytes for the extended sequence number */
      *outdata++ = 0;
      *outdata++ = 0;
      left -= 2;

      /* the headers start here */
      headers = outdata;

      /* make sure we can fit at least *one* header and pixel */
      if (!(left > (6 + pgroup))) {
        gst_rtp_buffer_unmap (&rtp);
        gst_buffer_unref (out);
        goto too_small;
      }

      /* while we can fit at least one header and one pixel */
      while (left > (6 + pgroup)) {
        /* we need a 6 bytes header */
        left -= 6;

        /* get how may bytes we need for the remaining pixels */
        pixels = width - offset;
        length = (pixels * pgroup) / xinc;

        if (left >= length) {
          /* pixels and header fit completely, we will write them and skip to the
           * next line. */
          next_line = TRUE;
        } else {
          /* line does not fit completely, see how many pixels fit */
          pixels = (left / pgroup) * xinc;
          length = (pixels * pgroup) / xinc;
          next_line = FALSE;
        }
        GST_LOG_OBJECT (rtpvrawpay, "filling %u bytes in %u pixels", length,
            pixels);
        left -= length;

        /* write length */
        *outdata++ = (length >> 8) & 0xff;
        *outdata++ = length & 0xff;

        /* write line no */
        *outdata++ = ((line >> 8) & 0x7f) | ((field << 7) & 0x80);
        *outdata++ = line & 0xff;

        if (next_line) {
          /* go to next line we do this here to make the check below easier */
          line += yinc;
        }

        /* calculate continuation marker */
        cont = (left > (6 + pgroup) && line < height) ? 0x80 : 0x00;

        /* write offset and continuation marker */
        *outdata++ = ((offset >> 8) & 0x7f) | cont;
        *outdata++ = offset & 0xff;

        if (next_line) {
          /* reset offset */
          offset = 0;
          GST_LOG_OBJECT (rtpvrawpay, "go to next line %u", line);
        } else {
          offset += pixels;
          GST_LOG_OBJECT (rtpvrawpay, "next offset %u", offset);
        }

        if (!cont)
          break;
      }
      GST_LOG_OBJECT (rtpvrawpay, "consumed %u bytes",
          (guint) (outdata - headers));

      /* second pass, read headers and write the data */
      while (TRUE) {
        guint offs, lin;

        /* read length and cont */
        length = (headers[0] << 8) | headers[1];
        lin = ((headers[2] & 0x7f) << 8) | headers[3];
        offs = ((headers[4] & 0x7f) << 8) | headers[5];
        cont = headers[4] & 0x80;
        pixels = length / pgroup;
        headers += 6;

        GST_LOG_OBJECT (payload,
            "writing length %u, line %u, offset %u, cont %d", length, lin, offs,
            cont);

        switch (format) {
          case GST_VIDEO_FORMAT_RGB:
          case GST_VIDEO_FORMAT_RGBA:
          case GST_VIDEO_FORMAT_BGR:
          case GST_VIDEO_FORMAT_BGRA:
          case GST_VIDEO_FORMAT_UYVY:
          case GST_VIDEO_FORMAT_UYVP:
            offs /= xinc;
            memcpy (outdata, p0 + (lin * ystride) + (offs * pgroup), length);
            outdata += length;
            break;
          case GST_VIDEO_FORMAT_AYUV:
          {
            gint i;
            guint8 *datap;

            datap = p0 + (lin * ystride) + (offs * 4);

            for (i = 0; i < pixels; i++) {
              *outdata++ = datap[2];
              *outdata++ = datap[1];
              *outdata++ = datap[3];
              datap += 4;
            }
            break;
          }
          case GST_VIDEO_FORMAT_I420:
          {
            gint i;
            guint uvoff;
            guint8 *yd1p, *yd2p, *udp, *vdp;

            yd1p = yp + (lin * ystride) + (offs);
            yd2p = yd1p + ystride;
            uvoff = (lin / yinc * uvstride) + (offs / xinc);
            udp = up + uvoff;
            vdp = vp + uvoff;

            for (i = 0; i < pixels; i++) {
              *outdata++ = *yd1p++;
              *outdata++ = *yd1p++;
              *outdata++ = *yd2p++;
              *outdata++ = *yd2p++;
              *outdata++ = *udp++;
              *outdata++ = *vdp++;
            }
            break;
          }
          case GST_VIDEO_FORMAT_Y41B:
          {
            gint i;
            guint uvoff;
            guint8 *ydp, *udp, *vdp;

            ydp = yp + (lin * ystride) + offs;
            uvoff = (lin / yinc * uvstride) + (offs / xinc);
            udp = up + uvoff;
            vdp = vp + uvoff;

            for (i = 0; i < pixels; i++) {
              *outdata++ = *udp++;
              *outdata++ = *ydp++;
              *outdata++ = *ydp++;
              *outdata++ = *vdp++;
              *outdata++ = *ydp++;
              *outdata++ = *ydp++;
            }
            break;
          }
          default:
            gst_rtp_buffer_unmap (&rtp);
            gst_buffer_unref (out);
            goto unknown_sampling;
        }

        if (!cont)
          break;
      }

      if (line >= height) {
        GST_LOG_OBJECT (rtpvrawpay, "field/frame complete, set marker");
        gst_rtp_buffer_set_marker (&rtp, TRUE);
        complete = TRUE;
      }
      gst_rtp_buffer_unmap (&rtp);
      if (left > 0) {
        GST_LOG_OBJECT (rtpvrawpay, "we have %u bytes left", left);
        gst_buffer_resize (out, 0, gst_buffer_get_size (out) - left);
      }

      /* Now either push out the buffer directly */
      if (!use_buffer_lists) {
        ret = gst_rtp_base_payload_push (payload, out);
        continue;
      }

      /* or add the buffer to buffer list ... */
      gst_buffer_list_add (list, out);

      /* .. and check if we need to push out the list */
      pack_line = (line - field) / fields;
      if (complete || (pack_line > last_line && pack_line % lines_delay == 0)) {
        GST_LOG_OBJECT (rtpvrawpay, "pushing list of %u buffers up to pack "
            "line %u", gst_buffer_list_length (list), pack_line);
        ret = gst_rtp_base_payload_push_list (payload, list);
        list = NULL;
        if (!complete)
          list = gst_buffer_list_new_sized (buffers_per_list);
        last_line = pack_line;
      }
    }

  }

  gst_video_frame_unmap (&frame);
  gst_buffer_unref (buffer);

  return ret;

  /* ERRORS */
unknown_sampling:
  {
    GST_ELEMENT_ERROR (payload, STREAM, FORMAT,
        (NULL), ("unimplemented sampling"));
    gst_video_frame_unmap (&frame);
    gst_buffer_unref (buffer);
    return GST_FLOW_NOT_SUPPORTED;
  }
too_small:
  {
    GST_ELEMENT_ERROR (payload, RESOURCE, NO_SPACE_LEFT,
        (NULL), ("not enough space to send at least one pixel"));
    gst_video_frame_unmap (&frame);
    gst_buffer_unref (buffer);
    return GST_FLOW_NOT_SUPPORTED;
  }
}
コード例 #22
0
ファイル: libde265-dec.c プロジェクト: 0p1pp1/gst-plugins-bad
static GstFlowReturn
_gst_libde265_return_image (GstVideoDecoder * decoder,
    GstVideoCodecFrame * frame, const struct de265_image *img)
{
  GstLibde265Dec *dec = GST_LIBDE265_DEC (decoder);
  struct GstLibde265FrameRef *ref;
  GstFlowReturn result;
  GstVideoFrame outframe;
  GstVideoCodecFrame *out_frame;
  int frame_number;
  int plane;

  ref = (struct GstLibde265FrameRef *) de265_get_image_plane_user_data (img, 0);
  if (ref != NULL) {
    /* decoder is using direct rendering */
    out_frame = gst_video_codec_frame_ref (ref->frame);
    if (frame != NULL) {
      gst_video_codec_frame_unref (frame);
    }
    gst_buffer_replace (&out_frame->output_buffer, ref->buffer);
    gst_buffer_replace (&ref->buffer, NULL);
    return gst_video_decoder_finish_frame (decoder, out_frame);
  }

  result =
      _gst_libde265_image_available (decoder, de265_get_image_width (img, 0),
      de265_get_image_height (img, 0));
  if (result != GST_FLOW_OK) {
    GST_ERROR_OBJECT (dec, "Failed to notify about available image");
    return result;
  }

  frame_number = (uintptr_t) de265_get_image_user_data (img) - 1;
  if (frame_number != -1) {
    out_frame = gst_video_decoder_get_frame (decoder, frame_number);
  } else {
    out_frame = NULL;
  }
  if (frame != NULL) {
    gst_video_codec_frame_unref (frame);
  }

  if (out_frame == NULL) {
    GST_ERROR_OBJECT (dec, "No frame available to return");
    return GST_FLOW_ERROR;
  }

  result = gst_video_decoder_allocate_output_frame (decoder, out_frame);
  if (result != GST_FLOW_OK) {
    GST_ERROR_OBJECT (dec, "Failed to allocate output frame");
    return result;
  }

  g_assert (dec->output_state != NULL);
  if (!gst_video_frame_map (&outframe, &dec->output_state->info,
          out_frame->output_buffer, GST_MAP_WRITE)) {
    GST_ERROR_OBJECT (dec, "Failed to map output buffer");
    return GST_FLOW_ERROR;
  }

  for (plane = 0; plane < 3; plane++) {
    int width = de265_get_image_width (img, plane);
    int height = de265_get_image_height (img, plane);
    int srcstride = width;
    int dststride = GST_VIDEO_FRAME_COMP_STRIDE (&outframe, plane);
    const uint8_t *src = de265_get_image_plane (img, plane, &srcstride);
    uint8_t *dest = GST_VIDEO_FRAME_COMP_DATA (&outframe, plane);
    if (srcstride == width && dststride == width) {
      memcpy (dest, src, height * width);
    } else {
      while (height--) {
        memcpy (dest, src, width);
        src += srcstride;
        dest += dststride;
      }
    }
  }
  gst_video_frame_unmap (&outframe);
  return gst_video_decoder_finish_frame (decoder, out_frame);
}
コード例 #23
0
static GstFlowReturn
gst_video_mark_yuv (GstSimpleVideoMark * simplevideomark, GstVideoFrame * frame)
{
  gint i, pw, ph, row_stride, pixel_stride;
  gint width, height, offset_calc, x, y;
  guint8 *d;
  guint64 pattern_shift;
  guint8 color;
  gint total_pattern;

  width = frame->info.width;
  height = frame->info.height;

  pw = simplevideomark->pattern_width;
  ph = simplevideomark->pattern_height;
  row_stride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
  pixel_stride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0);

  d = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
  offset_calc =
      row_stride * (height - ph - simplevideomark->bottom_offset) +
      pixel_stride * simplevideomark->left_offset;
  x = simplevideomark->left_offset;
  y = height - ph - simplevideomark->bottom_offset;

  total_pattern =
      simplevideomark->pattern_count + simplevideomark->pattern_data_count;
  /* If x and y offset values are outside the video, no need to draw */
  if ((x + (pw * total_pattern)) < 0 || x > width || (y + height) < 0
      || y > height) {
    GST_ERROR_OBJECT (simplevideomark,
        "simplevideomark pattern is outside the video. Not drawing.");
    return GST_FLOW_OK;
  }

  /* Offset calculation less than 0, then reset to 0 */
  if (offset_calc < 0)
    offset_calc = 0;
  /* Y position of mark is negative or pattern exceeds the video height,
     then recalculate pattern height for partial display */
  if (y < 0)
    ph += y;
  else if ((y + ph) > height)
    ph = height - y;
  /* If pattern height is less than 0, need not draw anything */
  if (ph < 0)
    return GST_FLOW_OK;

  /* move to start of bottom left */
  d += offset_calc;

  /* draw the bottom left pixels */
  for (i = 0; i < simplevideomark->pattern_count; i++) {
    gint draw_pw;

    if (i & 1)
      /* odd pixels must be white */
      color = 255;
    else
      color = 0;

    /* X position of mark is negative or pattern exceeds the video width,
       then recalculate pattern width for partial display */
    draw_pw = calculate_pw (pw, x, width);
    /* If pattern width is less than 0, continue with the next pattern */
    if (draw_pw < 0)
      continue;

    /* draw box of width * height */
    gst_video_mark_draw_box (simplevideomark, d, draw_pw, ph, row_stride,
        pixel_stride, color);

    /* move to i-th pattern */
    d += pixel_stride * draw_pw;
    x += draw_pw;

    if ((x + (pw * (total_pattern - i - 1))) < 0 || x >= width)
      return GST_FLOW_OK;
  }

  pattern_shift =
      G_GUINT64_CONSTANT (1) << (simplevideomark->pattern_data_count - 1);

  /* get the data of the pattern */
  for (i = 0; i < simplevideomark->pattern_data_count; i++) {
    gint draw_pw;
    if (simplevideomark->pattern_data & pattern_shift)
      color = 255;
    else
      color = 0;

    /* X position of mark is negative or pattern exceeds the video width,
       then recalculate pattern width for partial display */
    draw_pw = calculate_pw (pw, x, width);
    /* If pattern width is less than 0, continue with the next pattern */
    if (draw_pw < 0)
      continue;

    gst_video_mark_draw_box (simplevideomark, d, draw_pw, ph, row_stride,
        pixel_stride, color);

    pattern_shift >>= 1;

    /* move to i-th pattern data */
    d += pixel_stride * draw_pw;
    x += draw_pw;

    if ((x + (pw * (simplevideomark->pattern_data_count - i - 1))) < 0
        || x >= width)
      return GST_FLOW_OK;
  }

  return GST_FLOW_OK;
}
コード例 #24
0
ファイル: vtenc.c プロジェクト: asdlei00/gst-plugins-bad
static GstFlowReturn
gst_vtenc_encode_frame (GstVTEnc * self, GstBuffer * buf)
{
  GstVTApi *vt = self->ctx->vt;
  CMTime ts, duration;
  GstCoreMediaMeta *meta;
  CVPixelBufferRef pbuf = NULL;
  VTStatus vt_status;
  GstFlowReturn ret = GST_FLOW_OK;
  guint i;

  self->cur_inbuf = buf;

  ts = CMTimeMake (GST_TIME_AS_MSECONDS (GST_BUFFER_TIMESTAMP (buf)), 1000);
  duration = CMTimeMake
      (GST_TIME_AS_MSECONDS (GST_BUFFER_DURATION (buf)), 1000);

  meta = gst_buffer_get_core_media_meta (buf);
  if (meta != NULL) {
    pbuf = gst_core_media_buffer_get_pixel_buffer (buf);
  }

  if (pbuf == NULL) {
    GstVTEncFrame *frame;
    CVReturn cv_ret;

    frame = gst_vtenc_frame_new (buf, &self->video_info);
    if (!frame)
      goto cv_error;

    {
      const size_t num_planes = GST_VIDEO_FRAME_N_PLANES (&frame->videoframe);
      void *plane_base_addresses[GST_VIDEO_MAX_PLANES];
      size_t plane_widths[GST_VIDEO_MAX_PLANES];
      size_t plane_heights[GST_VIDEO_MAX_PLANES];
      size_t plane_bytes_per_row[GST_VIDEO_MAX_PLANES];
      OSType pixel_format_type;
      size_t i;

      for (i = 0; i < num_planes; i++) {
        plane_base_addresses[i] =
            GST_VIDEO_FRAME_PLANE_DATA (&frame->videoframe, i);
        plane_widths[i] = GST_VIDEO_FRAME_COMP_WIDTH (&frame->videoframe, i);
        plane_heights[i] = GST_VIDEO_FRAME_COMP_HEIGHT (&frame->videoframe, i);
        plane_bytes_per_row[i] =
            GST_VIDEO_FRAME_COMP_STRIDE (&frame->videoframe, i);
        plane_bytes_per_row[i] =
            GST_VIDEO_FRAME_COMP_STRIDE (&frame->videoframe, i);
      }

      switch (GST_VIDEO_INFO_FORMAT (&self->video_info)) {
        case GST_VIDEO_FORMAT_I420:
          pixel_format_type = kCVPixelFormatType_420YpCbCr8Planar;
          break;
        case GST_VIDEO_FORMAT_NV12:
          pixel_format_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
          break;
        default:
          goto cv_error;
      }

      cv_ret = CVPixelBufferCreateWithPlanarBytes (NULL,
          self->negotiated_width, self->negotiated_height,
          pixel_format_type,
          frame,
          GST_VIDEO_FRAME_SIZE (&frame->videoframe),
          num_planes,
          plane_base_addresses,
          plane_widths,
          plane_heights,
          plane_bytes_per_row, gst_pixel_buffer_release_cb, frame, NULL, &pbuf);
      if (cv_ret != kCVReturnSuccess) {
        gst_vtenc_frame_free (frame);
        goto cv_error;
      }
    }
  }

  GST_OBJECT_LOCK (self);

  self->expect_keyframe = CFDictionaryContainsKey (self->options,
      *(vt->kVTEncodeFrameOptionKey_ForceKeyFrame));
  if (self->expect_keyframe)
    gst_vtenc_clear_cached_caps_downstream (self);

  vt_status = self->ctx->vt->VTCompressionSessionEncodeFrame (self->session,
      pbuf, ts, duration, self->options, NULL, NULL);

  if (vt_status != 0) {
    GST_WARNING_OBJECT (self, "VTCompressionSessionEncodeFrame returned %d",
        vt_status);
  }

  self->ctx->vt->VTCompressionSessionCompleteFrames (self->session,
      kCMTimeInvalid);

  GST_OBJECT_UNLOCK (self);

  CVPixelBufferRelease (pbuf);
  self->cur_inbuf = NULL;
  gst_buffer_unref (buf);

  if (self->cur_outbufs->len > 0) {
    meta =
        gst_buffer_get_core_media_meta (g_ptr_array_index (self->cur_outbufs,
            0));
    if (!gst_vtenc_negotiate_downstream (self, meta->sample_buf))
      ret = GST_FLOW_NOT_NEGOTIATED;
  }

  for (i = 0; i != self->cur_outbufs->len; i++) {
    GstBuffer *buf = g_ptr_array_index (self->cur_outbufs, i);

    if (ret == GST_FLOW_OK) {
      ret = gst_pad_push (self->srcpad, buf);
    } else {
      gst_buffer_unref (buf);
    }
  }
  g_ptr_array_set_size (self->cur_outbufs, 0);

  return ret;

cv_error:
  {
    self->cur_inbuf = NULL;
    gst_buffer_unref (buf);

    return GST_FLOW_ERROR;
  }
}
コード例 #25
0
static GstFlowReturn
gst_segmentation_transform_ip (GstVideoFilter * btrans, GstVideoFrame * frame)
{
  GstSegmentation *filter = GST_SEGMENTATION (btrans);
  int j;

  /*  get image data from the input, which is RGBA */
  filter->cvRGBA->imageData = (char *) GST_VIDEO_FRAME_COMP_DATA (frame, 0);
  filter->cvRGBA->widthStep = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
  filter->framecount++;

  /*  Image preprocessing: color space conversion etc */
  cvCvtColor (filter->cvRGBA, filter->cvRGB, CV_RGBA2RGB);
  cvCvtColor (filter->cvRGB, filter->cvYUV, CV_RGB2YCrCb);

  /* Create and update a fg/bg model using a codebook approach following the 
   * opencv O'Reilly book [1] implementation of the algo described in [2].
   *
   * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary 
   * Bradski and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
   * [2] "Real-time Foreground-Background Segmentation using Codebook Model", 
   * Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005. */
  if (METHOD_BOOK == filter->method) {
    unsigned cbBounds[3] = { 10, 5, 5 };
    int minMod[3] = { 20, 20, 20 }, maxMod[3] = {
    20, 20, 20};

    if (filter->framecount < 30) {
      /* Learning background phase: update_codebook on every frame */
      for (j = 0; j < filter->width * filter->height; j++) {
        update_codebook ((unsigned char *) filter->cvYUV->imageData + j * 3,
            (codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
      }
    } else {
      /*  this updating is responsible for FG becoming BG again */
      if (filter->framecount % filter->learning_interval == 0) {
        for (j = 0; j < filter->width * filter->height; j++) {
          update_codebook ((uchar *) filter->cvYUV->imageData + j * 3,
              (codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
        }
      }
      if (filter->framecount % 60 == 0) {
        for (j = 0; j < filter->width * filter->height; j++)
          clear_stale_entries ((codeBook *) & (filter->TcodeBook[j]));
      }

      for (j = 0; j < filter->width * filter->height; j++) {
        if (background_diff
            ((uchar *) filter->cvYUV->imageData + j * 3,
                (codeBook *) & (filter->TcodeBook[j]), 3, minMod, maxMod)) {
          filter->cvFG->imageData[j] = 255;
        } else {
          filter->cvFG->imageData[j] = 0;
        }
      }
    }

    /* 3rd param is the smallest area to show: (w+h)/param , in pixels */
    find_connected_components (filter->cvFG, 1, 10000,
        filter->mem_storage, filter->contours);

  }
  /* Create the foreground and background masks using BackgroundSubtractorMOG [1], 
   *  Gaussian Mixture-based Background/Foreground segmentation algorithm. OpenCV 
   * MOG implements the algorithm described in [2].
   * 
   * [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
   * [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background 
   * mixture model for real-time tracking with shadow detection", Proc. 2nd 
   * European Workshop on Advanced Video-Based Surveillance Systems, 2001
   */
  else if (METHOD_MOG == filter->method) {
    run_mog_iteration (filter);
  }
  /* Create the foreground and background masks using BackgroundSubtractorMOG2
   * [1], Gaussian Mixture-based Background/Foreground segmentation algorithm. 
   * OpenCV MOG2 implements the algorithm described in [2] and [3].
   * 
   * [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
   * [2] Z.Zivkovic, "Improved adaptive Gausian mixture model for background 
   * subtraction", International Conference Pattern Recognition, UK, Aug 2004.
   * [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation 
   * per Image Pixel for the Task of Background Subtraction", Pattern 
   * Recognition Letters, vol. 27, no. 7, pages 773-780, 2006.   */
  else if (METHOD_MOG2 == filter->method) {
    run_mog2_iteration (filter);
  }

  /*  if we want to test_mode, just overwrite the output */
  if (filter->test_mode) {
    cvCvtColor (filter->cvFG, filter->cvRGB, CV_GRAY2RGB);

    cvSplit (filter->cvRGB, filter->ch1, filter->ch2, filter->ch3, NULL);
  } else
    cvSplit (filter->cvRGBA, filter->ch1, filter->ch2, filter->ch3, NULL);

  /*  copy anyhow the fg/bg to the alpha channel in the output image */
  cvMerge (filter->ch1, filter->ch2, filter->ch3, filter->cvFG, filter->cvRGBA);


  return GST_FLOW_OK;
}
コード例 #26
0
ファイル: gstcompare.c プロジェクト: 0p1pp1/gst-plugins-bad
static gdouble
gst_compare_ssim (GstCompare * comp, GstBuffer * buf1, GstCaps * caps1,
    GstBuffer * buf2, GstCaps * caps2)
{
  GstVideoInfo info1, info2;
  GstVideoFrame frame1, frame2;
  gint i, comps;
  gdouble cssim[4], ssim, c[4] = { 1.0, 0.0, 0.0, 0.0 };

  if (!caps1)
    goto invalid_input;

  if (!gst_video_info_from_caps (&info1, caps1))
    goto invalid_input;

  if (!caps2)
    goto invalid_input;

  if (!gst_video_info_from_caps (&info2, caps1))
    goto invalid_input;

  if (GST_VIDEO_INFO_FORMAT (&info1) != GST_VIDEO_INFO_FORMAT (&info2) ||
      GST_VIDEO_INFO_WIDTH (&info1) != GST_VIDEO_INFO_WIDTH (&info2) ||
      GST_VIDEO_INFO_HEIGHT (&info1) != GST_VIDEO_INFO_HEIGHT (&info2))
    return comp->threshold + 1;

  comps = GST_VIDEO_INFO_N_COMPONENTS (&info1);
  /* note that some are reported both yuv and gray */
  for (i = 0; i < comps; ++i)
    c[i] = 1.0;
  /* increase luma weight if yuv */
  if (GST_VIDEO_INFO_IS_YUV (&info1) && (comps > 1))
    c[0] = comps - 1;
  for (i = 0; i < comps; ++i)
    c[i] /= (GST_VIDEO_INFO_IS_YUV (&info1) && (comps > 1)) ?
        2 * (comps - 1) : comps;

  gst_video_frame_map (&frame1, &info1, buf1, GST_MAP_READ);
  gst_video_frame_map (&frame2, &info2, buf2, GST_MAP_READ);

  for (i = 0; i < comps; i++) {
    gint cw, ch, step, stride;

    /* only support most common formats */
    if (GST_VIDEO_INFO_COMP_DEPTH (&info1, i) != 8)
      goto unsupported_input;
    cw = GST_VIDEO_FRAME_COMP_WIDTH (&frame1, i);
    ch = GST_VIDEO_FRAME_COMP_HEIGHT (&frame1, i);
    step = GST_VIDEO_FRAME_COMP_PSTRIDE (&frame1, i);
    stride = GST_VIDEO_FRAME_COMP_STRIDE (&frame1, i);

    GST_LOG_OBJECT (comp, "component %d", i);
    cssim[i] = gst_compare_ssim_component (comp,
        GST_VIDEO_FRAME_COMP_DATA (&frame1, i),
        GST_VIDEO_FRAME_COMP_DATA (&frame2, i), cw, ch, step, stride);
    GST_LOG_OBJECT (comp, "ssim[%d] = %f", i, cssim[i]);
  }

  gst_video_frame_unmap (&frame1);
  gst_video_frame_unmap (&frame2);

#ifndef GST_DISABLE_GST_DEBUG
  for (i = 0; i < 4; i++) {
    GST_DEBUG_OBJECT (comp, "ssim[%d] = %f, c[%d] = %f", i, cssim[i], i, c[i]);
  }
#endif

  ssim = cssim[0] * c[0] + cssim[1] * c[1] + cssim[2] * c[2] + cssim[3] * c[3];

  return ssim;

  /* ERRORS */
invalid_input:
  {
    GST_ERROR_OBJECT (comp, "ssim method needs raw video input");
    return 0;
  }
unsupported_input:
  {
    GST_ERROR_OBJECT (comp, "raw video format not supported %" GST_PTR_FORMAT,
        caps1);
    return 0;
  }
}
コード例 #27
0
ファイル: gstdvdec.c プロジェクト: slkwyy/gst-plugins-good
static GstFlowReturn
gst_dvdec_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
  GstDVDec *dvdec;
  guint8 *inframe;
  guint8 *outframe_ptrs[3];
  gint outframe_pitches[3];
  GstMapInfo map;
  GstVideoFrame frame;
  GstBuffer *outbuf;
  GstFlowReturn ret = GST_FLOW_OK;
  guint length;
  guint64 cstart = GST_CLOCK_TIME_NONE, cstop = GST_CLOCK_TIME_NONE;
  gboolean PAL, wide;

  dvdec = GST_DVDEC (parent);

  gst_buffer_map (buf, &map, GST_MAP_READ);
  inframe = map.data;

  /* buffer should be at least the size of one NTSC frame, this should
   * be enough to decode the header. */
  if (G_UNLIKELY (map.size < NTSC_BUFFER))
    goto wrong_size;

  /* preliminary dropping. unref and return if outside of configured segment */
  if ((dvdec->segment.format == GST_FORMAT_TIME) &&
      (!(gst_segment_clip (&dvdec->segment, GST_FORMAT_TIME,
                  GST_BUFFER_TIMESTAMP (buf),
                  GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf),
                  &cstart, &cstop))))
    goto dropping;

  if (G_UNLIKELY (dv_parse_header (dvdec->decoder, inframe) < 0))
    goto parse_header_error;

  /* get size */
  PAL = dv_system_50_fields (dvdec->decoder);
  wide = dv_format_wide (dvdec->decoder);

  /* check the buffer is of right size after we know if we are
   * dealing with PAL or NTSC */
  length = (PAL ? PAL_BUFFER : NTSC_BUFFER);
  if (G_UNLIKELY (map.size < length))
    goto wrong_size;

  dv_parse_packs (dvdec->decoder, inframe);

  if (dvdec->video_offset % dvdec->drop_factor != 0)
    goto skip;

  /* renegotiate on change */
  if (PAL != dvdec->PAL || wide != dvdec->wide) {
    dvdec->src_negotiated = FALSE;
    dvdec->PAL = PAL;
    dvdec->wide = wide;
  }

  dvdec->height = (dvdec->PAL ? PAL_HEIGHT : NTSC_HEIGHT);

  dvdec->interlaced = !dv_is_progressive (dvdec->decoder);

  /* negotiate if not done yet */
  if (!dvdec->src_negotiated) {
    if (!gst_dvdec_src_negotiate (dvdec))
      goto not_negotiated;
  }

  if (gst_pad_check_reconfigure (dvdec->srcpad)) {
    GstCaps *caps;

    caps = gst_pad_get_current_caps (dvdec->srcpad);
    if (!caps)
      goto not_negotiated;

    gst_dvdec_negotiate_pool (dvdec, caps, &dvdec->vinfo);
    gst_caps_unref (caps);
  }

  if (dvdec->need_segment) {
    gst_pad_push_event (dvdec->srcpad, gst_event_new_segment (&dvdec->segment));
    dvdec->need_segment = FALSE;
  }

  ret = gst_buffer_pool_acquire_buffer (dvdec->pool, &outbuf, NULL);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto no_buffer;

  gst_video_frame_map (&frame, &dvdec->vinfo, outbuf, GST_MAP_WRITE);

  outframe_ptrs[0] = GST_VIDEO_FRAME_COMP_DATA (&frame, 0);
  outframe_pitches[0] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0);

  /* the rest only matters for YUY2 */
  if (dvdec->bpp < 3) {
    outframe_ptrs[1] = GST_VIDEO_FRAME_COMP_DATA (&frame, 1);
    outframe_ptrs[2] = GST_VIDEO_FRAME_COMP_DATA (&frame, 2);

    outframe_pitches[1] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 1);
    outframe_pitches[2] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 2);
  }

  GST_DEBUG_OBJECT (dvdec, "decoding and pushing buffer");
  dv_decode_full_frame (dvdec->decoder, inframe,
      e_dv_color_yuv, outframe_ptrs, outframe_pitches);

  gst_video_frame_unmap (&frame);

  GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);

  GST_BUFFER_OFFSET (outbuf) = GST_BUFFER_OFFSET (buf);
  GST_BUFFER_OFFSET_END (outbuf) = GST_BUFFER_OFFSET_END (buf);

  /* FIXME : Compute values when using non-TIME segments,
   * but for the moment make sure we at least don't set bogus values
   */
  if (GST_CLOCK_TIME_IS_VALID (cstart)) {
    GST_BUFFER_TIMESTAMP (outbuf) = cstart;
    if (GST_CLOCK_TIME_IS_VALID (cstop))
      GST_BUFFER_DURATION (outbuf) = cstop - cstart;
  }

  ret = gst_pad_push (dvdec->srcpad, outbuf);

skip:
  dvdec->video_offset++;

done:
  gst_buffer_unmap (buf, &map);
  gst_buffer_unref (buf);

  return ret;

  /* ERRORS */
wrong_size:
  {
    GST_ELEMENT_ERROR (dvdec, STREAM, DECODE,
        (NULL), ("Input buffer too small"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
parse_header_error:
  {
    GST_ELEMENT_ERROR (dvdec, STREAM, DECODE,
        (NULL), ("Error parsing DV header"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
not_negotiated:
  {
    GST_DEBUG_OBJECT (dvdec, "could not negotiate output");
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }
no_buffer:
  {
    GST_DEBUG_OBJECT (dvdec, "could not allocate buffer");
    goto done;
  }

dropping:
  {
    GST_DEBUG_OBJECT (dvdec,
        "dropping buffer since it's out of the configured segment");
    goto done;
  }
}
コード例 #28
0
static GstBuffer *
gst_rtp_vraw_depay_process (GstRTPBaseDepayload * depayload, GstBuffer * buf)
{
  GstRtpVRawDepay *rtpvrawdepay;
  guint8 *payload, *p0, *yp, *up, *vp, *headers;
  guint32 timestamp;
  guint cont, ystride, uvstride, pgroup, payload_len;
  gint width, height, xinc, yinc;
  GstRTPBuffer rtp = { NULL };
  GstVideoFrame *frame;
  gboolean marker;
  GstBuffer *outbuf = NULL;

  rtpvrawdepay = GST_RTP_VRAW_DEPAY (depayload);

  gst_rtp_buffer_map (buf, GST_MAP_READ, &rtp);

  timestamp = gst_rtp_buffer_get_timestamp (&rtp);

  if (timestamp != rtpvrawdepay->timestamp || rtpvrawdepay->outbuf == NULL) {
    GstBuffer *new_buffer;
    GstFlowReturn ret;

    GST_LOG_OBJECT (depayload, "new frame with timestamp %u", timestamp);
    /* new timestamp, flush old buffer and create new output buffer */
    if (rtpvrawdepay->outbuf) {
      gst_video_frame_unmap (&rtpvrawdepay->frame);
      gst_rtp_base_depayload_push (depayload, rtpvrawdepay->outbuf);
      rtpvrawdepay->outbuf = NULL;
    }

    if (gst_pad_check_reconfigure (GST_RTP_BASE_DEPAYLOAD_SRCPAD (depayload))) {
      GstCaps *caps;

      caps =
          gst_pad_get_current_caps (GST_RTP_BASE_DEPAYLOAD_SRCPAD (depayload));
      gst_rtp_vraw_depay_negotiate_pool (rtpvrawdepay, caps,
          &rtpvrawdepay->vinfo);
      gst_caps_unref (caps);
    }

    ret =
        gst_buffer_pool_acquire_buffer (rtpvrawdepay->pool, &new_buffer, NULL);

    if (G_UNLIKELY (ret != GST_FLOW_OK))
      goto alloc_failed;

    /* clear timestamp from alloc... */
    GST_BUFFER_TIMESTAMP (new_buffer) = -1;

    if (!gst_video_frame_map (&rtpvrawdepay->frame, &rtpvrawdepay->vinfo,
            new_buffer, GST_MAP_WRITE | GST_VIDEO_FRAME_MAP_FLAG_NO_REF)) {
      gst_buffer_unref (new_buffer);
      goto invalid_frame;
    }

    rtpvrawdepay->outbuf = new_buffer;
    rtpvrawdepay->timestamp = timestamp;
  }

  frame = &rtpvrawdepay->frame;

  g_assert (frame->buffer != NULL);

  /* get pointer and strides of the planes */
  p0 = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
  yp = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
  up = GST_VIDEO_FRAME_COMP_DATA (frame, 1);
  vp = GST_VIDEO_FRAME_COMP_DATA (frame, 2);

  ystride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
  uvstride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 1);

  pgroup = rtpvrawdepay->pgroup;
  width = GST_VIDEO_INFO_WIDTH (&rtpvrawdepay->vinfo);
  height = GST_VIDEO_INFO_HEIGHT (&rtpvrawdepay->vinfo);
  xinc = rtpvrawdepay->xinc;
  yinc = rtpvrawdepay->yinc;

  payload = gst_rtp_buffer_get_payload (&rtp);
  payload_len = gst_rtp_buffer_get_payload_len (&rtp);

  if (payload_len < 3)
    goto short_packet;

  /* skip extended seqnum */
  payload += 2;
  payload_len -= 2;

  /* remember header position */
  headers = payload;

  /* find data start */
  do {
    if (payload_len < 6)
      goto short_packet;

    cont = payload[4] & 0x80;

    payload += 6;
    payload_len -= 6;
  } while (cont);

  while (TRUE) {
    guint length, line, offs, plen;
    guint8 *datap;

    /* stop when we run out of data */
    if (payload_len == 0)
      break;

    /* read length and cont. This should work because we iterated the headers
     * above. */
    length = (headers[0] << 8) | headers[1];
    line = ((headers[2] & 0x7f) << 8) | headers[3];
    offs = ((headers[4] & 0x7f) << 8) | headers[5];
    cont = headers[4] & 0x80;
    headers += 6;

    /* length must be a multiple of pgroup */
    if (length % pgroup != 0)
      goto wrong_length;

    if (length > payload_len)
      length = payload_len;

    /* sanity check */
    if (line > (height - yinc)) {
      GST_WARNING_OBJECT (depayload, "skipping line %d: out of range", line);
      goto next;
    }
    if (offs > (width - xinc)) {
      GST_WARNING_OBJECT (depayload, "skipping offset %d: out of range", offs);
      goto next;
    }

    /* calculate the maximim amount of bytes we can use per line */
    if (offs + ((length / pgroup) * xinc) > width) {
      plen = ((width - offs) * pgroup) / xinc;
      GST_WARNING_OBJECT (depayload, "clipping length %d, offset %d, plen %d",
          length, offs, plen);
    } else
      plen = length;

    GST_LOG_OBJECT (depayload,
        "writing length %u/%u, line %u, offset %u, remaining %u", plen, length,
        line, offs, payload_len);

    switch (GST_VIDEO_INFO_FORMAT (&rtpvrawdepay->vinfo)) {
      case GST_VIDEO_FORMAT_RGB:
      case GST_VIDEO_FORMAT_RGBA:
      case GST_VIDEO_FORMAT_BGR:
      case GST_VIDEO_FORMAT_BGRA:
      case GST_VIDEO_FORMAT_UYVY:
      case GST_VIDEO_FORMAT_UYVP:
        /* samples are packed just like gstreamer packs them */
        offs /= xinc;
        datap = p0 + (line * ystride) + (offs * pgroup);

        memcpy (datap, payload, plen);
        break;
      case GST_VIDEO_FORMAT_AYUV:
      {
        gint i;
        guint8 *p;

        datap = p0 + (line * ystride) + (offs * 4);
        p = payload;

        /* samples are packed in order Cb-Y-Cr for both interlaced and
         * progressive frames */
        for (i = 0; i < plen; i += pgroup) {
          *datap++ = 0;
          *datap++ = p[1];
          *datap++ = p[0];
          *datap++ = p[2];
          p += pgroup;
        }
        break;
      }
      case GST_VIDEO_FORMAT_I420:
      {
        gint i;
        guint uvoff;
        guint8 *yd1p, *yd2p, *udp, *vdp, *p;

        yd1p = yp + (line * ystride) + (offs);
        yd2p = yd1p + ystride;
        uvoff = (line / yinc * uvstride) + (offs / xinc);

        udp = up + uvoff;
        vdp = vp + uvoff;
        p = payload;

        /* line 0/1: Y00-Y01-Y10-Y11-Cb00-Cr00 Y02-Y03-Y12-Y13-Cb01-Cr01 ...  */
        for (i = 0; i < plen; i += pgroup) {
          *yd1p++ = p[0];
          *yd1p++ = p[1];
          *yd2p++ = p[2];
          *yd2p++ = p[3];
          *udp++ = p[4];
          *vdp++ = p[5];
          p += pgroup;
        }
        break;
      }
      case GST_VIDEO_FORMAT_Y41B:
      {
        gint i;
        guint uvoff;
        guint8 *ydp, *udp, *vdp, *p;

        ydp = yp + (line * ystride) + (offs);
        uvoff = (line / yinc * uvstride) + (offs / xinc);

        udp = up + uvoff;
        vdp = vp + uvoff;
        p = payload;

        /* Samples are packed in order Cb0-Y0-Y1-Cr0-Y2-Y3 for both interlaced
         * and progressive scan lines */
        for (i = 0; i < plen; i += pgroup) {
          *udp++ = p[0];
          *ydp++ = p[1];
          *ydp++ = p[2];
          *vdp++ = p[3];
          *ydp++ = p[4];
          *ydp++ = p[5];
          p += pgroup;
        }
        break;
      }
      default:
        goto unknown_sampling;
    }

  next:
    if (!cont)
      break;

    payload += length;
    payload_len -= length;
  }

  marker = gst_rtp_buffer_get_marker (&rtp);
  gst_rtp_buffer_unmap (&rtp);

  if (marker) {
    GST_LOG_OBJECT (depayload, "marker, flushing frame");
    gst_video_frame_unmap (&rtpvrawdepay->frame);
    outbuf = rtpvrawdepay->outbuf;
    rtpvrawdepay->outbuf = NULL;
    rtpvrawdepay->timestamp = -1;
  }
  return outbuf;

  /* ERRORS */
unknown_sampling:
  {
    GST_ELEMENT_ERROR (depayload, STREAM, FORMAT,
        (NULL), ("unimplemented sampling"));
    gst_rtp_buffer_unmap (&rtp);
    return NULL;
  }
alloc_failed:
  {
    GST_WARNING_OBJECT (depayload, "failed to alloc output buffer");
    gst_rtp_buffer_unmap (&rtp);
    return NULL;
  }
invalid_frame:
  {
    GST_ERROR_OBJECT (depayload, "could not map video frame");
    return NULL;
  }
wrong_length:
  {
    GST_WARNING_OBJECT (depayload, "length not multiple of pgroup");
    gst_rtp_buffer_unmap (&rtp);
    return NULL;
  }
short_packet:
  {
    GST_WARNING_OBJECT (depayload, "short packet");
    gst_rtp_buffer_unmap (&rtp);
    return NULL;
  }
}
コード例 #29
0
ファイル: gstzbar.c プロジェクト: ylatuya/gst-plugins-bad
static GstFlowReturn
gst_zbar_transform_frame_ip (GstVideoFilter * vfilter, GstVideoFrame * frame)
{
  GstZBar *zbar = GST_ZBAR (vfilter);
  gpointer data;
  gint stride, height;
  zbar_image_t *image;
  const zbar_symbol_t *symbol;
  int n;

  image = zbar_image_create ();

  /* all formats we support start with an 8-bit Y plane. zbar doesn't need
   * to know about the chroma plane(s) */
  data = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
  stride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
  height = GST_VIDEO_FRAME_HEIGHT (frame);

  zbar_image_set_format (image, GST_MAKE_FOURCC ('Y', '8', '0', '0'));
  zbar_image_set_size (image, stride, height);
  zbar_image_set_data (image, (gpointer) data, stride * height, NULL);

  /* scan the image for barcodes */
  n = zbar_scan_image (zbar->scanner, image);
  if (n == 0)
    goto out;

  /* extract results */
  symbol = zbar_image_first_symbol (image);
  for (; symbol; symbol = zbar_symbol_next (symbol)) {
    zbar_symbol_type_t typ = zbar_symbol_get_type (symbol);
    const char *data = zbar_symbol_get_data (symbol);
    gint quality = zbar_symbol_get_quality (symbol);

    GST_DEBUG_OBJECT (zbar, "decoded %s symbol \"%s\" at quality %d",
        zbar_get_symbol_name (typ), data, quality);

    if (zbar->cache && zbar_symbol_get_count (symbol) != 0)
      continue;

    if (zbar->message) {
      GstMessage *m;
      GstStructure *s;

      /* post a message */
      s = gst_structure_new ("barcode",
          "timestamp", G_TYPE_UINT64, GST_BUFFER_TIMESTAMP (frame->buffer),
          "type", G_TYPE_STRING, zbar_get_symbol_name (typ),
          "symbol", G_TYPE_STRING, data, "quality", G_TYPE_INT, quality, NULL);
      m = gst_message_new_element (GST_OBJECT (zbar), s);
      gst_element_post_message (GST_ELEMENT (zbar), m);
    }
  }

out:
  /* clean up */
  zbar_image_scanner_recycle_image (zbar->scanner, image);
  zbar_image_destroy (image);

  return GST_FLOW_OK;
}
コード例 #30
0
static GstFlowReturn
gst_rtp_vraw_pay_handle_buffer (GstRTPBasePayload * payload, GstBuffer * buffer)
{
  GstRtpVRawPay *rtpvrawpay;
  GstFlowReturn ret = GST_FLOW_OK;
  guint line, offset;
  guint8 *yp, *up, *vp;
  guint ystride, uvstride;
  guint pgroup;
  guint mtu;
  guint width, height;
  gint field;
  GstVideoFrame frame;
  gint interlaced;
  GstRTPBuffer rtp = { NULL, };

  rtpvrawpay = GST_RTP_VRAW_PAY (payload);

  gst_video_frame_map (&frame, &rtpvrawpay->vinfo, buffer, GST_MAP_READ);

  GST_LOG_OBJECT (rtpvrawpay, "new frame of %" G_GSIZE_FORMAT " bytes",
      gst_buffer_get_size (buffer));

  /* get pointer and strides of the planes */
  yp = GST_VIDEO_FRAME_COMP_DATA (&frame, 0);
  up = GST_VIDEO_FRAME_COMP_DATA (&frame, 1);
  vp = GST_VIDEO_FRAME_COMP_DATA (&frame, 2);

  ystride = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0);
  uvstride = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 1);

  mtu = GST_RTP_BASE_PAYLOAD_MTU (payload);

  /* amount of bytes for one pixel */
  pgroup = rtpvrawpay->pgroup;
  width = GST_VIDEO_INFO_WIDTH (&rtpvrawpay->vinfo);
  height = GST_VIDEO_INFO_HEIGHT (&rtpvrawpay->vinfo);

  interlaced = GST_VIDEO_INFO_IS_INTERLACED (&rtpvrawpay->vinfo);

  /* start with line 0, offset 0 */
  for (field = 0; field < 1 + interlaced; field++) {
    line = field;
    offset = 0;

    /* write all lines */
    while (line < height) {
      guint left;
      GstBuffer *out;
      guint8 *outdata, *headers;
      gboolean next_line;
      guint length, cont, pixels;

      /* get the max allowed payload length size, we try to fill the complete MTU */
      left = gst_rtp_buffer_calc_payload_len (mtu, 0, 0);
      out = gst_rtp_buffer_new_allocate (left, 0, 0);

      if (field == 0) {
        GST_BUFFER_TIMESTAMP (out) = GST_BUFFER_TIMESTAMP (buffer);
      } else {
        GST_BUFFER_TIMESTAMP (out) = GST_BUFFER_TIMESTAMP (buffer) +
            GST_BUFFER_DURATION (buffer) / 2;
      }

      gst_rtp_buffer_map (out, GST_MAP_WRITE, &rtp);
      outdata = gst_rtp_buffer_get_payload (&rtp);

      GST_LOG_OBJECT (rtpvrawpay, "created buffer of size %u for MTU %u", left,
          mtu);

      /*
       *   0                   1                   2                   3
       *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
       *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       *  |   Extended Sequence Number    |            Length             |
       *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       *  |F|          Line No            |C|           Offset            |
       *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       *  |            Length             |F|          Line No            |
       *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       *  |C|           Offset            |                               .
       *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+                               .
       *  .                                                               .
       *  .                 Two (partial) lines of video data             .
       *  .                                                               .
       *  +---------------------------------------------------------------+
       */

      /* need 2 bytes for the extended sequence number */
      *outdata++ = 0;
      *outdata++ = 0;
      left -= 2;

      /* the headers start here */
      headers = outdata;

      /* while we can fit at least one header and one pixel */
      while (left > (6 + pgroup)) {
        /* we need a 6 bytes header */
        left -= 6;

        /* get how may bytes we need for the remaining pixels */
        pixels = width - offset;
        length = (pixels * pgroup) / rtpvrawpay->xinc;

        if (left >= length) {
          /* pixels and header fit completely, we will write them and skip to the
           * next line. */
          next_line = TRUE;
        } else {
          /* line does not fit completely, see how many pixels fit */
          pixels = (left / pgroup) * rtpvrawpay->xinc;
          length = (pixels * pgroup) / rtpvrawpay->xinc;
          next_line = FALSE;
        }
        GST_LOG_OBJECT (rtpvrawpay, "filling %u bytes in %u pixels", length,
            pixels);
        left -= length;

        /* write length */
        *outdata++ = (length >> 8) & 0xff;
        *outdata++ = length & 0xff;

        /* write line no */
        *outdata++ = ((line >> 8) & 0x7f) | ((field << 7) & 0x80);
        *outdata++ = line & 0xff;

        if (next_line) {
          /* go to next line we do this here to make the check below easier */
          line += rtpvrawpay->yinc;
        }

        /* calculate continuation marker */
        cont = (left > (6 + pgroup) && line < height) ? 0x80 : 0x00;

        /* write offset and continuation marker */
        *outdata++ = ((offset >> 8) & 0x7f) | cont;
        *outdata++ = offset & 0xff;

        if (next_line) {
          /* reset offset */
          offset = 0;
          GST_LOG_OBJECT (rtpvrawpay, "go to next line %u", line);
        } else {
          offset += pixels;
          GST_LOG_OBJECT (rtpvrawpay, "next offset %u", offset);
        }

        if (!cont)
          break;
      }
      GST_LOG_OBJECT (rtpvrawpay, "consumed %u bytes",
          (guint) (outdata - headers));

      /* second pass, read headers and write the data */
      while (TRUE) {
        guint offs, lin;

        /* read length and cont */
        length = (headers[0] << 8) | headers[1];
        lin = ((headers[2] & 0x7f) << 8) | headers[3];
        offs = ((headers[4] & 0x7f) << 8) | headers[5];
        cont = headers[4] & 0x80;
        pixels = length / pgroup;
        headers += 6;

        GST_LOG_OBJECT (payload,
            "writing length %u, line %u, offset %u, cont %d", length, lin, offs,
            cont);

        switch (GST_VIDEO_INFO_FORMAT (&rtpvrawpay->vinfo)) {
          case GST_VIDEO_FORMAT_RGB:
          case GST_VIDEO_FORMAT_RGBA:
          case GST_VIDEO_FORMAT_BGR:
          case GST_VIDEO_FORMAT_BGRA:
          case GST_VIDEO_FORMAT_UYVY:
          case GST_VIDEO_FORMAT_UYVP:
            offs /= rtpvrawpay->xinc;
            memcpy (outdata, yp + (lin * ystride) + (offs * pgroup), length);
            outdata += length;
            break;
          case GST_VIDEO_FORMAT_AYUV:
          {
            gint i;
            guint8 *datap;

            datap = yp + (lin * ystride) + (offs * 4);

            for (i = 0; i < pixels; i++) {
              *outdata++ = datap[2];
              *outdata++ = datap[1];
              *outdata++ = datap[3];
              datap += 4;
            }
            break;
          }
          case GST_VIDEO_FORMAT_I420:
          {
            gint i;
            guint uvoff;
            guint8 *yd1p, *yd2p, *udp, *vdp;

            yd1p = yp + (lin * ystride) + (offs);
            yd2p = yd1p + ystride;
            uvoff =
                (lin / rtpvrawpay->yinc * uvstride) + (offs / rtpvrawpay->xinc);
            udp = up + uvoff;
            vdp = vp + uvoff;

            for (i = 0; i < pixels; i++) {
              *outdata++ = *yd1p++;
              *outdata++ = *yd1p++;
              *outdata++ = *yd2p++;
              *outdata++ = *yd2p++;
              *outdata++ = *udp++;
              *outdata++ = *vdp++;
            }
            break;
          }
          case GST_VIDEO_FORMAT_Y41B:
          {
            gint i;
            guint uvoff;
            guint8 *ydp, *udp, *vdp;

            ydp = yp + (lin * ystride) + offs;
            uvoff =
                (lin / rtpvrawpay->yinc * uvstride) + (offs / rtpvrawpay->xinc);
            udp = up + uvoff;
            vdp = vp + uvoff;

            for (i = 0; i < pixels; i++) {
              *outdata++ = *udp++;
              *outdata++ = *ydp++;
              *outdata++ = *ydp++;
              *outdata++ = *vdp++;
              *outdata++ = *ydp++;
              *outdata++ = *ydp++;
            }
            break;
          }
          default:
            gst_rtp_buffer_unmap (&rtp);
            gst_buffer_unref (out);
            goto unknown_sampling;
        }

        if (!cont)
          break;
      }

      if (line >= height) {
        GST_LOG_OBJECT (rtpvrawpay, "field/frame complete, set marker");
        gst_rtp_buffer_set_marker (&rtp, TRUE);
      }
      gst_rtp_buffer_unmap (&rtp);
      if (left > 0) {
        GST_LOG_OBJECT (rtpvrawpay, "we have %u bytes left", left);
        gst_buffer_resize (out, 0, gst_buffer_get_size (out) - left);
      }

      /* push buffer */
      ret = gst_rtp_base_payload_push (payload, out);
    }

  }

  gst_video_frame_unmap (&frame);
  gst_buffer_unref (buffer);

  return ret;

  /* ERRORS */
unknown_sampling:
  {
    GST_ELEMENT_ERROR (payload, STREAM, FORMAT,
        (NULL), ("unimplemented sampling"));
    gst_video_frame_unmap (&frame);
    gst_buffer_unref (buffer);
    return GST_FLOW_NOT_SUPPORTED;
  }
}