コード例 #1
0
ファイル: gstvp8enc.c プロジェクト: PeterXu/gst-mobile
static vpx_image_t *
gst_vp8_enc_buffer_to_image (GstVP8Enc * enc, GstBuffer * buffer)
{
  vpx_image_t *image = g_slice_new0 (vpx_image_t);
  guint8 *data = GST_BUFFER_DATA (buffer);
  GstVideoState *state = &GST_BASE_VIDEO_CODEC (enc)->state;

  image->fmt = VPX_IMG_FMT_I420;
  image->bps = 12;
  image->x_chroma_shift = image->y_chroma_shift = 1;
  image->img_data = data;
  image->w = image->d_w = state->width;
  image->h = image->d_h = state->height;

  image->stride[VPX_PLANE_Y] =
      gst_video_format_get_row_stride (state->format, 0, state->width);
  image->stride[VPX_PLANE_U] =
      gst_video_format_get_row_stride (state->format, 1, state->width);
  image->stride[VPX_PLANE_V] =
      gst_video_format_get_row_stride (state->format, 2, state->width);
  image->planes[VPX_PLANE_Y] =
      data + gst_video_format_get_component_offset (state->format, 0,
      state->width, state->height);
  image->planes[VPX_PLANE_U] =
      data + gst_video_format_get_component_offset (state->format, 1,
      state->width, state->height);
  image->planes[VPX_PLANE_V] =
      data + gst_video_format_get_component_offset (state->format, 2,
      state->width, state->height);

  return image;
}
コード例 #2
0
static void
gst_video_balance_planar_yuv (GstVideoBalance * videobalance, guint8 * data)
{
  gint x, y;
  guint8 *ydata;
  guint8 *udata, *vdata;
  gint ystride, ustride, vstride;
  GstVideoFormat format;
  gint width, height;
  gint width2, height2;
  guint8 *tabley = videobalance->tabley;
  guint8 **tableu = videobalance->tableu;
  guint8 **tablev = videobalance->tablev;

  format = videobalance->format;
  width = videobalance->width;
  height = videobalance->height;

  ydata =
      data + gst_video_format_get_component_offset (format, 0, width, height);
  ystride = gst_video_format_get_row_stride (format, 0, width);

  for (y = 0; y < height; y++) {
    guint8 *yptr;

    yptr = ydata + y * ystride;
    for (x = 0; x < width; x++) {
      *ydata = tabley[*ydata];
      ydata++;
    }
  }

  width2 = gst_video_format_get_component_width (format, 1, width);
  height2 = gst_video_format_get_component_height (format, 1, height);

  udata =
      data + gst_video_format_get_component_offset (format, 1, width, height);
  vdata =
      data + gst_video_format_get_component_offset (format, 2, width, height);
  ustride = gst_video_format_get_row_stride (format, 1, width);
  vstride = gst_video_format_get_row_stride (format, 1, width);

  for (y = 0; y < height2; y++) {
    guint8 *uptr, *vptr;
    guint8 u1, v1;

    uptr = udata + y * ustride;
    vptr = vdata + y * vstride;

    for (x = 0; x < width2; x++) {
      u1 = *uptr;
      v1 = *vptr;

      *uptr++ = tableu[u1][v1];
      *vptr++ = tablev[u1][v1];
    }
  }
}
コード例 #3
0
GstFlowReturn ofGstVideoUtils::process_buffer(shared_ptr<GstBuffer> _buffer){
	guint size = GST_BUFFER_SIZE (_buffer.get());
	int stride = 0;
	if(pixels.isAllocated() && pixels.getTotalBytes()!=(int)size){
        stride = gst_video_format_get_row_stride( GST_VIDEO_FORMAT_RGB,0, pixels.getWidth());
        if(stride == (pixels.getWidth() * pixels.getHeight() *  pixels.getBytesPerPixel())) {
            ofLogError("ofGstVideoUtils") << "buffer_cb(): error on new buffer, buffer size: " << size << "!= init size: " << pixels.getTotalBytes();
            return GST_FLOW_ERROR;
        }
	}
	mutex.lock();
	if(pixels.isAllocated()){
		backBuffer = _buffer;
        if(stride > 0) {
            backPixels.setFromAlignedPixels(GST_BUFFER_DATA (backBuffer.get()),pixels.getWidth(),pixels.getHeight(),pixels.getPixelFormat(),stride);
        }
        else {
            backPixels.setFromExternalPixels(GST_BUFFER_DATA (backBuffer.get()),pixels.getWidth(),pixels.getHeight(),pixels.getPixelFormat());
            eventPixels.setFromExternalPixels(GST_BUFFER_DATA (backBuffer.get()),pixels.getWidth(),pixels.getHeight(),pixels.getPixelFormat());
        }
		bBackPixelsChanged=true;
		mutex.unlock();
        if(stride == 0) {
        	ofNotifyEvent(prerollEvent,eventPixels);
        }
	}else{
		if(isStream && appsink){
			appsink->on_stream_prepared();
		}else{
			ofLogError("ofGstVideoUtils") << "preroll_cb(): received a preroll without allocation";
		}
		mutex.unlock();
	}
	return GST_FLOW_OK;
}
コード例 #4
0
ファイル: gstgaussblur.c プロジェクト: luisbg/gaudi_effects
static gboolean
gauss_blur_set_caps (GstBaseTransform * btrans,
    GstCaps * incaps, GstCaps * outcaps)
{
  GaussBlur *gb = GAUSS_BLUR (btrans);
  GstStructure *structure;
  GstVideoFormat format;
  guint32 n_elems;

  structure = gst_caps_get_structure (incaps, 0);
  g_return_val_if_fail (structure != NULL, FALSE);

  if (!gst_video_format_parse_caps (incaps, &format, &gb->width, &gb->height))
    return FALSE;

  /* get stride */
  gb->stride = gst_video_format_get_row_stride (format, 0, gb->width);

  n_elems = gb->stride * gb->height;

  gb->tempim = g_malloc (sizeof (gfloat) * n_elems);
  //gb->smoothedim = g_malloc (sizeof (guint16) * n_elems);

  return TRUE;
}
コード例 #5
0
ファイル: gstgamma.c プロジェクト: luisbg/gst-plugins-good
static void
gst_gamma_packed_yuv_ip (GstGamma * gamma, guint8 * data)
{
  gint i, j, height;
  gint width, row_stride, row_wrap;
  gint pixel_stride;
  const guint8 *table = gamma->gamma_table;

  data = data + gst_video_format_get_component_offset (gamma->format, 0,
      gamma->width, gamma->height);

  width = gst_video_format_get_component_width (gamma->format, 0, gamma->width);
  height = gst_video_format_get_component_height (gamma->format, 0,
      gamma->height);
  row_stride = gst_video_format_get_row_stride (gamma->format, 0, gamma->width);
  pixel_stride = gst_video_format_get_pixel_stride (gamma->format, 0);
  row_wrap = row_stride - pixel_stride * width;

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      *data = table[*data];
      data += pixel_stride;
    }
    data += row_wrap;
  }
}
コード例 #6
0
static void
gst_color_effects_transform_rgb (GstColorEffects * filter, guint8 * data)
{
  gint i, j;
  gint width, height;
  gint pixel_stride, row_stride, row_wrap;
  guint32 r, g, b;
  guint32 luma;
  gint offsets[3];

  /* videoformat fun copied from videobalance */

  offsets[0] = gst_video_format_get_component_offset (filter->format, 0,
      filter->width, filter->height);
  offsets[1] = gst_video_format_get_component_offset (filter->format, 1,
      filter->width, filter->height);
  offsets[2] = gst_video_format_get_component_offset (filter->format, 2,
      filter->width, filter->height);

  width =
      gst_video_format_get_component_width (filter->format, 0, filter->width);
  height =
      gst_video_format_get_component_height (filter->format, 0, filter->height);
  row_stride =
      gst_video_format_get_row_stride (filter->format, 0, filter->width);
  pixel_stride = gst_video_format_get_pixel_stride (filter->format, 0);
  row_wrap = row_stride - pixel_stride * width;

  /* transform */

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      r = data[offsets[0]];
      g = data[offsets[1]];
      b = data[offsets[2]];
      if (filter->map_luma) {
        /* BT. 709 coefficients in B8 fixed point */
        /* 0.2126 R + 0.7152 G + 0.0722 B */
        luma = ((r << 8) * 54) + ((g << 8) * 183) + ((b << 8) * 19);
        luma >>= 16;            /* get integer part */
        luma *= 3;              /* times 3 to retrieve the correct pixel from
                                 * the lut */
        /* map luma to lookup table */
        /* src.luma |-> table[luma].rgb */
        data[offsets[0]] = filter->table[luma];
        data[offsets[1]] = filter->table[luma + 1];
        data[offsets[2]] = filter->table[luma + 2];
      } else {
        /* map each color component to the correspondent lut color */
        /* src.r |-> table[r].r */
        /* src.g |-> table[g].g */
        /* src.b |-> table[b].b */
        data[offsets[0]] = filter->table[r * 3];
        data[offsets[1]] = filter->table[g * 3 + 1];
        data[offsets[2]] = filter->table[b * 3 + 2];
      }
      data += pixel_stride;
    }
コード例 #7
0
/* Allocate buffer and copy image data into Y444 format */
static GstFlowReturn
theora_handle_image (GstTheoraDec * dec, th_ycbcr_buffer buf, GstBuffer ** out)
{
  gint width, height, stride;
  GstFlowReturn result;
  int i, plane;
  GstVideoFormat format;
  guint8 *dest, *src;

  switch (dec->info.pixel_fmt) {
    case TH_PF_444:
      format = GST_VIDEO_FORMAT_Y444;
      break;
    case TH_PF_420:
      format = GST_VIDEO_FORMAT_I420;
      break;
    case TH_PF_422:
      format = GST_VIDEO_FORMAT_Y42B;
      break;
    default:
      g_assert_not_reached ();
  }

  result =
      gst_pad_alloc_buffer_and_set_caps (dec->srcpad, GST_BUFFER_OFFSET_NONE,
      gst_video_format_get_size (format, dec->width, dec->height),
      GST_PAD_CAPS (dec->srcpad), out);
  if (G_UNLIKELY (result != GST_FLOW_OK)) {
    GST_DEBUG_OBJECT (dec, "could not get buffer, reason: %s",
        gst_flow_get_name (result));
    return result;
  }

  for (plane = 0; plane < 3; plane++) {
    width = gst_video_format_get_component_width (format, plane, dec->width);
    height = gst_video_format_get_component_height (format, plane, dec->height);
    stride = gst_video_format_get_row_stride (format, plane, dec->width);

    dest =
        GST_BUFFER_DATA (*out) + gst_video_format_get_component_offset (format,
        plane, dec->width, dec->height);
    src = buf[plane].data;
    src += ((height == dec->height) ? dec->offset_y : dec->offset_y / 2)
        * buf[plane].stride;
    src += (width == dec->width) ? dec->offset_x : dec->offset_x / 2;

    for (i = 0; i < height; i++) {
      memcpy (dest, src, width);

      dest += stride;
      src += buf[plane].stride;
    }
  }

  return GST_FLOW_OK;
}
コード例 #8
0
ファイル: gstvideoscale.c プロジェクト: ChinnaSuhas/ossbuild
static void
gst_video_scale_setup_vs_image (VSImage * image, GstVideoFormat format,
    gint component, gint width, gint height, gint b_w, gint b_h, uint8_t * data)
{
  image->real_width =
      gst_video_format_get_component_width (format, component, width);
  image->real_height =
      gst_video_format_get_component_height (format, component, height);
  image->width =
      gst_video_format_get_component_width (format, component, MAX (1,
          width - b_w));
  image->height =
      gst_video_format_get_component_height (format, component, MAX (1,
          height - b_h));
  image->stride = gst_video_format_get_row_stride (format, component, width);

  image->border_top = (image->real_height - image->height) / 2;
  image->border_bottom = image->real_height - image->height - image->border_top;

  if (format == GST_VIDEO_FORMAT_YUY2 || format == GST_VIDEO_FORMAT_YVYU
      || format == GST_VIDEO_FORMAT_UYVY) {
    g_assert (component == 0);

    image->border_left = (image->real_width - image->width) / 2;

    if (image->border_left % 2 == 1)
      image->border_left--;
    image->border_right = image->real_width - image->width - image->border_left;
  } else {
    image->border_left = (image->real_width - image->width) / 2;
    image->border_right = image->real_width - image->width - image->border_left;
  }

  if (format == GST_VIDEO_FORMAT_I420
      || format == GST_VIDEO_FORMAT_YV12
      || format == GST_VIDEO_FORMAT_Y444
      || format == GST_VIDEO_FORMAT_Y42B || format == GST_VIDEO_FORMAT_Y41B) {
    image->real_pixels = data + gst_video_format_get_component_offset (format,
        component, width, height);
  } else {
    g_assert (component == 0);
    image->real_pixels = data;
  }

  image->pixels =
      image->real_pixels + image->border_top * image->stride +
      image->border_left * gst_video_format_get_pixel_stride (format,
      component);
}
コード例 #9
0
ファイル: gstgamma.c プロジェクト: luisbg/gst-plugins-good
static void
gst_gamma_packed_rgb_ip (GstGamma * gamma, guint8 * data)
{
  gint i, j, height;
  gint width, row_stride, row_wrap;
  gint pixel_stride;
  const guint8 *table = gamma->gamma_table;
  gint offsets[3];
  gint r, g, b;
  gint y, u, v;

  offsets[0] = gst_video_format_get_component_offset (gamma->format, 0,
      gamma->width, gamma->height);
  offsets[1] = gst_video_format_get_component_offset (gamma->format, 1,
      gamma->width, gamma->height);
  offsets[2] = gst_video_format_get_component_offset (gamma->format, 2,
      gamma->width, gamma->height);

  width = gst_video_format_get_component_width (gamma->format, 0, gamma->width);
  height = gst_video_format_get_component_height (gamma->format, 0,
      gamma->height);
  row_stride = gst_video_format_get_row_stride (gamma->format, 0, gamma->width);
  pixel_stride = gst_video_format_get_pixel_stride (gamma->format, 0);
  row_wrap = row_stride - pixel_stride * width;

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      r = data[offsets[0]];
      g = data[offsets[1]];
      b = data[offsets[2]];

      y = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 0, r, g, b);
      u = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 1, r, g, b);
      v = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 2, r, g, b);

      y = table[CLAMP (y, 0, 255)];
      r = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 0, y, u, v);
      g = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 1, y, u, v);
      b = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 2, y, u, v);

      data[offsets[0]] = CLAMP (r, 0, 255);
      data[offsets[1]] = CLAMP (g, 0, 255);
      data[offsets[2]] = CLAMP (b, 0, 255);
      data += pixel_stride;
    }
    data += row_wrap;
  }
}
コード例 #10
0
static void
gst_ffmpegscale_fill_info(GstFFMpegScale* scale, GstVideoFormat format,
                          guint width, guint height, gint stride[], gint offset[]) {
    gint i;

    for (i = 0; i < 3; i++) {
        stride[i] = gst_video_format_get_row_stride(format, i, width);
        offset[i] = gst_video_format_get_component_offset(format, i, width,
                    height);

        /* stay close to the ffmpeg offset way */
        if (offset[i] < 3) {
            offset[i] = 0;
        }

        GST_DEBUG_OBJECT(scale, "format %d, component %d; stride %d, offset %d",
                         format, i, stride[i], offset[i]);
    }
}
コード例 #11
0
ファイル: gsttheoraenc.c プロジェクト: ChinnaSuhas/ossbuild
static void
theora_enc_init_buffer (th_ycbcr_buffer buf, th_info * info, guint8 * data)
{
  GstVideoFormat format;
  guint i;

  switch (info->pixel_fmt) {
    case TH_PF_444:
      format = GST_VIDEO_FORMAT_Y444;
      break;
    case TH_PF_420:
      format = GST_VIDEO_FORMAT_I420;
      break;
    case TH_PF_422:
      format = GST_VIDEO_FORMAT_Y42B;
      break;
    default:
      g_assert_not_reached ();
  }

  /* According to Theora developer Timothy Terriberry, the Theora 
   * encoder will not use memory outside of pic_width/height, even when
   * the frame size is bigger. The values outside this region will be encoded
   * to default values.
   * Due to this, setting the frame's width/height as the buffer width/height
   * is perfectly ok, even though it does not strictly look ok.
   */
  for (i = 0; i < 3; i++) {
    buf[i].width =
        gst_video_format_get_component_width (format, i, info->frame_width);
    buf[i].height =
        gst_video_format_get_component_height (format, i, info->frame_height);

    buf[i].data =
        data + gst_video_format_get_component_offset (format, i,
        info->pic_width, info->pic_height);
    buf[i].stride =
        gst_video_format_get_row_stride (format, i, info->pic_width);
  }
}
コード例 #12
0
static gboolean
gst_gdk_pixbuf_sink_set_caps (GstBaseSink * basesink, GstCaps * caps)
{
  GstGdkPixbufSink *sink = GST_GDK_PIXBUF_SINK (basesink);
  GstVideoFormat fmt;
  gint w, h, par_n, par_d;

  GST_LOG_OBJECT (sink, "caps: %" GST_PTR_FORMAT, caps);

  if (!gst_video_format_parse_caps (caps, &fmt, &w, &h)) {
    GST_WARNING_OBJECT (sink, "parse_caps failed");
    return FALSE;
  }

  if (!gst_video_parse_caps_pixel_aspect_ratio (caps, &par_n, &par_d)) {
    GST_LOG_OBJECT (sink, "no pixel aspect ratio");
    return FALSE;
  }

  g_assert ((fmt == GST_VIDEO_FORMAT_RGB &&
          gst_video_format_get_pixel_stride (fmt, 0) == 3) ||
      (fmt == GST_VIDEO_FORMAT_RGBA &&
          gst_video_format_get_pixel_stride (fmt, 0) == 4));

  GST_VIDEO_SINK_WIDTH (sink) = w;
  GST_VIDEO_SINK_HEIGHT (sink) = h;

  sink->rowstride = gst_video_format_get_row_stride (fmt, 0, w);
  sink->has_alpha = (fmt == GST_VIDEO_FORMAT_RGBA);

  sink->par_n = par_n;
  sink->par_d = par_d;

  GST_INFO_OBJECT (sink, "format             : %d", fmt);
  GST_INFO_OBJECT (sink, "width x height     : %d x %d", w, h);
  GST_INFO_OBJECT (sink, "pixel-aspect-ratio : %d/%d", par_d, par_n);

  return TRUE;
}
コード例 #13
0
ファイル: assrender.c プロジェクト: adesurya/gst-mobile
static void
sink_handoff_cb_I420 (GstElement * object, GstBuffer * buffer, GstPad * pad,
    gpointer user_data)
{
  guint *sink_pos = (guint *) user_data;
  gboolean contains_text = (*sink_pos == 1 || *sink_pos == 2);
  guint c, i, j;
  guint8 *data = GST_BUFFER_DATA (buffer);
  gboolean all_red = TRUE;
  guint8 *comp;
  gint comp_stride, comp_width, comp_height;
  const guint8 color[] = { 81, 90, 240 };

  fail_unless_equals_int (GST_BUFFER_SIZE (buffer),
      gst_video_format_get_size (GST_VIDEO_FORMAT_I420, 640, 480));

  for (c = 0; c < 3; c++) {
    comp =
        data + gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420, c,
        640, 480);
    comp_stride =
        gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, c, 640);
    comp_width =
        gst_video_format_get_component_width (GST_VIDEO_FORMAT_I420, c, 640);
    comp_height =
        gst_video_format_get_component_height (GST_VIDEO_FORMAT_I420, c, 480);

    for (i = 0; i < comp_height; i++) {
      for (j = 0; j < comp_width; j++) {
        all_red = all_red && (comp[i * comp_stride + j] == color[c]);
      }
    }
  }

  fail_unless (contains_text != all_red,
      "Frame %d is incorrect (all red %d, contains text %d)", *sink_pos,
      all_red, contains_text);
  *sink_pos = *sink_pos + 1;
}
コード例 #14
0
static gboolean
gst_geometric_transform_set_caps (GstBaseTransform * btrans, GstCaps * incaps,
    GstCaps * outcaps)
{
  GstGeometricTransform *gt;
  gboolean ret;
  gint old_width;
  gint old_height;
  GstGeometricTransformClass *klass;

  gt = GST_GEOMETRIC_TRANSFORM_CAST (btrans);
  klass = GST_GEOMETRIC_TRANSFORM_GET_CLASS (gt);

  old_width = gt->width;
  old_height = gt->height;

  ret = gst_video_format_parse_caps (incaps, &gt->format, &gt->width,
      &gt->height);
  if (ret) {
    gt->row_stride = gst_video_format_get_row_stride (gt->format, 0, gt->width);
    gt->pixel_stride = gst_video_format_get_pixel_stride (gt->format, 0);

    /* regenerate the map */
    GST_OBJECT_LOCK (gt);
    if (gt->map == NULL || old_width == 0 || old_height == 0
        || gt->width != old_width || gt->height != old_height) {
      if (klass->prepare_func)
        if (!klass->prepare_func (gt)) {
          GST_OBJECT_UNLOCK (gt);
          return FALSE;
        }
      if (gt->precalc_map)
        gst_geometric_transform_generate_map (gt);
    }
    GST_OBJECT_UNLOCK (gt);
  }
  return ret;
}
コード例 #15
0
static void
gst_video_flip_planar_yuv (GstVideoFlip * videoflip, guint8 * dest,
    const guint8 * src)
{
  gint x, y;
  guint8 const *s;
  guint8 *d;
  GstVideoFormat format = videoflip->format;
  gint sw = videoflip->from_width;
  gint sh = videoflip->from_height;
  gint dw = videoflip->to_width;
  gint dh = videoflip->to_height;
  gint src_y_stride, src_u_stride, src_v_stride;
  gint src_y_offset, src_u_offset, src_v_offset;
  gint src_y_height, src_u_height, src_v_height;
  gint src_y_width, src_u_width, src_v_width;
  gint dest_y_stride, dest_u_stride, dest_v_stride;
  gint dest_y_offset, dest_u_offset, dest_v_offset;
  gint dest_y_height, dest_u_height, dest_v_height;
  gint dest_y_width, dest_u_width, dest_v_width;

  src_y_stride = gst_video_format_get_row_stride (format, 0, sw);
  src_u_stride = gst_video_format_get_row_stride (format, 1, sw);
  src_v_stride = gst_video_format_get_row_stride (format, 2, sw);

  dest_y_stride = gst_video_format_get_row_stride (format, 0, dw);
  dest_u_stride = gst_video_format_get_row_stride (format, 1, dw);
  dest_v_stride = gst_video_format_get_row_stride (format, 2, dw);

  src_y_offset = gst_video_format_get_component_offset (format, 0, sw, sh);
  src_u_offset = gst_video_format_get_component_offset (format, 1, sw, sh);
  src_v_offset = gst_video_format_get_component_offset (format, 2, sw, sh);

  dest_y_offset = gst_video_format_get_component_offset (format, 0, dw, dh);
  dest_u_offset = gst_video_format_get_component_offset (format, 1, dw, dh);
  dest_v_offset = gst_video_format_get_component_offset (format, 2, dw, dh);

  src_y_width = gst_video_format_get_component_width (format, 0, sw);
  src_u_width = gst_video_format_get_component_width (format, 1, sw);
  src_v_width = gst_video_format_get_component_width (format, 2, sw);

  dest_y_width = gst_video_format_get_component_width (format, 0, dw);
  dest_u_width = gst_video_format_get_component_width (format, 1, dw);
  dest_v_width = gst_video_format_get_component_width (format, 2, dw);

  src_y_height = gst_video_format_get_component_height (format, 0, sh);
  src_u_height = gst_video_format_get_component_height (format, 1, sh);
  src_v_height = gst_video_format_get_component_height (format, 2, sh);

  dest_y_height = gst_video_format_get_component_height (format, 0, dh);
  dest_u_height = gst_video_format_get_component_height (format, 1, dh);
  dest_v_height = gst_video_format_get_component_height (format, 2, dh);

  switch (videoflip->method) {
    case GST_VIDEO_FLIP_METHOD_90R:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] =
              s[(src_y_height - 1 - x) * src_y_stride + y];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] =
              s[(src_u_height - 1 - x) * src_u_stride + y];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_v_height; y++) {
        for (x = 0; x < dest_v_width; x++) {
          d[y * dest_v_stride + x] =
              s[(src_v_height - 1 - x) * src_v_stride + y];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_90L:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] =
              s[x * src_y_stride + (src_y_width - 1 - y)];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] =
              s[x * src_u_stride + (src_u_width - 1 - y)];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_v_height; y++) {
        for (x = 0; x < dest_v_width; x++) {
          d[y * dest_v_stride + x] =
              s[x * src_v_stride + (src_v_width - 1 - y)];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_180:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] =
              s[(src_y_height - 1 - y) * src_y_stride + (src_y_width - 1 - x)];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] =
              s[(src_u_height - 1 - y) * src_u_stride + (src_u_width - 1 - x)];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_v_height; y++) {
        for (x = 0; x < dest_v_width; x++) {
          d[y * dest_v_stride + x] =
              s[(src_v_height - 1 - y) * src_v_stride + (src_v_width - 1 - x)];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_HORIZ:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] =
              s[y * src_y_stride + (src_y_width - 1 - x)];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] =
              s[y * src_u_stride + (src_u_width - 1 - x)];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_v_height; y++) {
        for (x = 0; x < dest_v_width; x++) {
          d[y * dest_v_stride + x] =
              s[y * src_v_stride + (src_v_width - 1 - x)];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_VERT:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] =
              s[(src_y_height - 1 - y) * src_y_stride + x];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] =
              s[(src_u_height - 1 - y) * src_u_stride + x];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_v_height; y++) {
        for (x = 0; x < dest_v_width; x++) {
          d[y * dest_v_stride + x] =
              s[(src_v_height - 1 - y) * src_v_stride + x];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_TRANS:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] = s[x * src_y_stride + y];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] = s[x * src_u_stride + y];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_v_stride + x] = s[x * src_v_stride + y];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_OTHER:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] =
              s[(src_y_height - 1 - x) * src_y_stride + (src_y_width - 1 - y)];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] =
              s[(src_u_height - 1 - x) * src_u_stride + (src_u_width - 1 - y)];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_v_height; y++) {
        for (x = 0; x < dest_v_width; x++) {
          d[y * dest_v_stride + x] =
              s[(src_v_height - 1 - x) * src_v_stride + (src_v_width - 1 - y)];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_IDENTITY:
      g_assert_not_reached ();
      break;
    default:
      g_assert_not_reached ();
      break;
  }
}
コード例 #16
0
static gboolean
gst_jasper_enc_sink_setcaps (GstPad * pad, GstCaps * caps)
{
    GstJasperEnc *enc;
    GstVideoFormat format;
    gint width, height;
    gint fps_num, fps_den;
    gint par_num, par_den;
    gint i;

    enc = GST_JASPER_ENC (GST_PAD_PARENT (pad));

    /* get info from caps */
    if (!gst_video_format_parse_caps (caps, &format, &width, &height))
        goto refuse_caps;
    /* optional; pass along if present */
    fps_num = fps_den = -1;
    par_num = par_den = -1;
    gst_video_parse_caps_framerate (caps, &fps_num, &fps_den);
    gst_video_parse_caps_pixel_aspect_ratio (caps, &par_num, &par_den);

    if (width == enc->width && height == enc->height && enc->format == format
            && fps_num == enc->fps_num && fps_den == enc->fps_den
            && par_num == enc->par_num && par_den == enc->par_den)
        return TRUE;

    /* store input description */
    enc->format = format;
    enc->width = width;
    enc->height = height;
    enc->fps_num = fps_num;
    enc->fps_den = fps_den;
    enc->par_num = par_num;
    enc->par_den = par_den;

    /* prepare a cached image description  */
    enc->channels = 3 + (gst_video_format_has_alpha (format) ? 1 : 0);
    for (i = 0; i < enc->channels; ++i) {
        enc->cwidth[i] = gst_video_format_get_component_width (format, i, width);
        enc->cheight[i] = gst_video_format_get_component_height (format, i, height);
        enc->offset[i] = gst_video_format_get_component_offset (format, i, width,
                         height);
        enc->stride[i] = gst_video_format_get_row_stride (format, i, width);
        enc->inc[i] = gst_video_format_get_pixel_stride (format, i);
    }

    if (!gst_jasper_enc_set_src_caps (enc))
        goto setcaps_failed;
    if (!gst_jasper_enc_init_encoder (enc))
        goto setup_failed;

    return TRUE;

    /* ERRORS */
setup_failed:
    {
        GST_ELEMENT_ERROR (enc, LIBRARY, SETTINGS, (NULL), (NULL));
        return FALSE;
    }
setcaps_failed:
    {
        GST_WARNING_OBJECT (enc, "Setting src caps failed");
        GST_ELEMENT_ERROR (enc, LIBRARY, SETTINGS, (NULL), (NULL));
        return FALSE;
    }
refuse_caps:
    {
        GST_WARNING_OBJECT (enc, "refused caps %" GST_PTR_FORMAT, caps);
        gst_object_unref (enc);
        return FALSE;
    }
}
コード例 #17
0
gboolean AVSC_CC
_avs_vcf_add_buffer (AVS_VideoCacheFilter *p, GstPad *pad, GstBuffer *inbuf, AVS_ScriptEnvironment *env)
{
  AVS_VideoFrame *buf_ptr;
  AVS_CacheableVideoFrame *cvf;
  AVSynthSink *sink;

  gboolean ret = TRUE;

  guint8 *in_data;
  guint in_size;
  GstClockTime in_timestamp, in_duration, in_running_time;
  gint64 in_offset;
  GstVideoFormat vf;
  gint in_stride0, in_stride1, in_stride2;
  gint offset0, offset1, offset2;
  gint rowsize0, rowsize1, rowsize2;
  gint height0, height1, height2;

  in_data = GST_BUFFER_DATA (inbuf);
  in_size = GST_BUFFER_SIZE (inbuf);
  in_timestamp = GST_BUFFER_TIMESTAMP (inbuf);
  in_duration = GST_BUFFER_DURATION (inbuf);
  in_offset = GST_BUFFER_OFFSET (inbuf);

  sink = (AVSynthSink *) g_object_get_data (G_OBJECT (pad), "sinkstruct");

  GST_DEBUG ("Video cache %p: locking sinkmutex", (gpointer) p);
  g_mutex_lock (sink->sinkmutex);

  in_running_time = gst_segment_to_running_time (&sink->segment, GST_FORMAT_TIME, in_timestamp);

  if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID(sink->first_ts) && GST_CLOCK_TIME_IS_VALID(in_timestamp)))
  {
    sink->first_ts = in_timestamp;
  }

  /* Offset voodoo magic */
  /* No offset on incoming frame */
  if (in_offset == -1)
  {
    /* Do we know offset of the previous frame? */
    if (sink->last_offset > -1)
    {
      in_offset = sink->last_offset + 1;
    }
    else
    {
      /* Try to convert timestamp to offset */
      if (in_timestamp >= 0 && in_running_time >= 0)
      {
        in_offset = gst_util_uint64_scale (in_running_time - sink->first_ts, p->parent.vi.fps_numerator,
            p->parent.vi.fps_denominator * GST_SECOND);
        /* Attempt to round to nearest integer: if the difference is more
         * than 0.5 (less than -0.5), it means that gst_util_uint64_scale()
         * just truncated an integer, while it had to be rounded
         */

        in_offset = in_offset * GST_SECOND - 
            in_running_time * p->parent.vi.fps_numerator / p->parent.vi.fps_denominator <= 
            -0.5 ? in_offset + 1: in_offset;
      }
      else
      {
        GST_ERROR ("Video cache %p: frame offset is unknown", (gpointer) p);
        ret = FALSE;
        goto end;
      }
    }
  }
  /* Offset sanity check */
  if (sink->last_offset > -1)
  {
    /* Non-monotonic offsets */
    if (in_offset < sink->last_offset || in_offset > sink->last_offset + 1)
    {
      GST_WARNING ("Video cache %p: last offset was %" G_GUINT64_FORMAT ", current offset is %" G_GUINT64_FORMAT " - shouldn't it be %" G_GUINT64_FORMAT "?", p, sink->last_offset, in_offset, sink->last_offset + 1);
      in_offset = sink->last_offset + 1;
    }
    else if (in_offset == sink->last_offset)
    {
      GST_WARNING ("Video cache %p: duplicate offsets %" G_GUINT64_FORMAT ", dropping", (gpointer) p, in_offset);
      goto end;
    }
  }

  sink->last_offset = in_offset;

  if (p->size >= p->used_size && !sink->flush)
  {
    GST_DEBUG ("Video cache %p: blocking at frame %" G_GUINT64_FORMAT, (gpointer) p, in_offset);
    while (p->size >= p->used_size && !sink->flush && !sink->seeking)
    {
      GST_DEBUG ("Video cache %p: sleeping while waiting at frame %" G_GUINT64_FORMAT
        ", cache range%" G_GUINT64_FORMAT "+ %" G_GUINT64_FORMAT
        ", size=%" G_GUINT64_FORMAT" , stream lock = %p", (gpointer) p, in_offset, p->rng_from, p->used_size,
        p->size, GST_PAD_GET_STREAM_LOCK (pad));
      g_cond_wait (p->vcache_block_cond, sink->sinkmutex);
      GST_DEBUG ("Video cache %p: woke up while waiting at frame %" G_GUINT64_FORMAT
        ", cache range%" G_GUINT64_FORMAT "+ %" G_GUINT64_FORMAT
        ", size=%" G_GUINT64_FORMAT" , stream lock = %p", (gpointer) p, in_offset, p->rng_from, p->used_size,
        p->size, GST_PAD_GET_STREAM_LOCK (pad));
    }
  }

  /* We've been seeking backwards and the seek wasn't very precise, so
   * we're getting frames previous to the frame we need.
   * Or we're in seek mode and the frame is not the frame we're seeking to.
   * If we've pushed a seek event and it moved the source to a frame after
   * rng_from (i.e. the seek missed), this will turn into infinite loop.
   */
  if (G_UNLIKELY (sink->flush || in_offset < p->rng_from && !sink->seeking || sink->seeking && in_offset != p->rng_from))
  {
    if (sink->flush)
      GST_DEBUG ("Video cache %p: skipping frame %" G_GUINT64_FORMAT " - flushing", (gpointer) p, in_offset);
    else if (in_offset < p->rng_from && !sink->seeking)
      GST_DEBUG ("Video cache %p: skipping frame %" G_GUINT64_FORMAT " < %" G_GUINT64_FORMAT, (gpointer) p, in_offset, p->rng_from);
    else if (sink->seeking && in_offset != p->rng_from)
      GST_DEBUG ("Video cache %p: skipping frame %" G_GUINT64_FORMAT " - seeking to %" G_GUINT64_FORMAT, (gpointer) p, in_offset, p->rng_from);   
    goto end;
  }
  sink->seeking = FALSE;

  gst_avsynth_buf_pad_caps_to_vi (inbuf, pad, GST_BUFFER_CAPS (inbuf), &p->parent.vi);

  gst_video_format_parse_caps (GST_BUFFER_CAPS (inbuf), &vf, NULL, NULL);

  /* Allocate a new frame, with default alignment */
  buf_ptr = _avs_se_vf_new_a (env, &p->vi, AVS_FRAME_ALIGN);

  offset0 = gst_video_format_get_component_offset (vf, 0, p->parent.vi.width, p->parent.vi.height);
  offset1 = gst_video_format_get_component_offset (vf, 1, p->parent.vi.width, p->parent.vi.height);
  offset2 = gst_video_format_get_component_offset (vf, 2, p->parent.vi.width, p->parent.vi.height);

  /* The Spherical Horse in Vacuum: row stride is not guaranteed to match the
   * value returned by this function.
   */
  in_stride0 = gst_video_format_get_row_stride (vf, 0, p->parent.vi.width);
  in_stride1 = gst_video_format_get_row_stride (vf, 1, p->parent.vi.width);
  in_stride2 = gst_video_format_get_row_stride (vf, 2, p->parent.vi.width);

  rowsize0 = gst_video_format_get_component_width (vf, 0, p->parent.vi.width) * gst_video_format_get_pixel_stride (vf, 0);
  rowsize1 = gst_video_format_get_component_width (vf, 1, p->parent.vi.width) * gst_video_format_get_pixel_stride (vf, 1);
  rowsize2 = gst_video_format_get_component_width (vf, 2, p->parent.vi.width) * gst_video_format_get_pixel_stride (vf, 2);

  height0 = gst_video_format_get_component_height (vf, 0, p->parent.vi.height);
  height1 = gst_video_format_get_component_height (vf, 1, p->parent.vi.height);
  height2 = gst_video_format_get_component_height (vf, 2, p->parent.vi.height);

  if (!AVS_IS_PLANAR (&p->parent.vi))
  {
    offset2 = offset1 = offset0;
    in_stride2 = in_stride1 = 0;
    rowsize2 = rowsize1 = 0;
    height2 = height1 = 0;
  }

  _avs_se_bit_blt (env, _avs_vf_get_write_ptr (buf_ptr), _avs_vf_get_pitch (buf_ptr), in_data + offset0, in_stride0, rowsize0, height0);
  // Blit More planes (pitch, rowsize and height should be 0, if none is present)
  _avs_se_bit_blt (env, _avs_vf_get_write_ptr_p (buf_ptr, PLANAR_U), _avs_vf_get_pitch_p (buf_ptr, PLANAR_U), in_data + offset1, in_stride1, rowsize1, height1);
  _avs_se_bit_blt (env, _avs_vf_get_write_ptr_p (buf_ptr, PLANAR_V), _avs_vf_get_pitch_p (buf_ptr, PLANAR_V), in_data + offset2, in_stride2, rowsize2, height2);

  cvf = g_new0 (AVS_CacheableVideoFrame, 1);
  cvf->vf = buf_ptr;
  cvf->touched = FALSE;
  cvf->selfindex = in_offset;
  cvf->countindex = p->framecounter++;
  _avs_vf_set_timestamp (buf_ptr, in_timestamp);
  _avs_vf_set_parity (buf_ptr, p->parent.vi.image_type);

  /* Buffer is full, meaning that a filter is not processing frames
   * fast enough.
   */
  if (G_UNLIKELY (p->used_size <= p->size))
  {
    if (p->size > p->used_size)
      g_critical ("Video cache %p: buffer overflow - %" G_GUINT64_FORMAT " > %" G_GUINT64_FORMAT, (gpointer) p, p->used_size, p->size);
    GST_DEBUG ("Video cache %p: cache is full", (gpointer) p);
    /* Cache is relatively small, we can expand it */
    if (G_UNLIKELY (p->touched_last_time * 3 > p->used_size))
    {
      GST_DEBUG ("Video cache %p: cache is relatively small (%" G_GUINT64_FORMAT " > %" G_GUINT64_FORMAT "), expanding...", (gpointer) p, p->touched_last_time * 3, p->used_size);
      _avs_vcf_resize (p, p->used_size + 1);
    }
    else
      g_critical ("Video cache %p: cache is overflowing!", (gpointer) p);
  }

  /* It is guaranteed that at this moment we have at least one free unused
   * array element left. At least it should be guaranteed...
   */
  GST_DEBUG ("Video cache %p: cache size = %" G_GUINT64_FORMAT ", adding a buffer %p (%p), offset = %" G_GUINT64_FORMAT, (gpointer) p, p->size, cvf, buf_ptr, in_offset);
  g_ptr_array_index (p->bufs, p->size++) = (gpointer) cvf;

  /* We don't really know the number of frame the other thread is waiting for
   * (or even if it waits at all), so we'll send a signal each time we add
   * a buffer.
   * People told me that calling g_cond_signal once for each frame (60 times
   * a second, unless you're transcoding a video) doesn't make a difference.
   * And transcoding itself is MUCH slower.
   */
  GST_DEBUG ("Video cache %p: signaling newframe", (gpointer) p);
  g_cond_signal (p->vcache_cond);

end:
  g_mutex_unlock (sink->sinkmutex);
 
  GST_DEBUG ("Video cache %p: unlocked sinkmutex", (gpointer) p);

  return ret;
}
コード例 #18
0
static GstFlowReturn
gst_jasper_dec_negotiate (GstJasperDec * dec, jas_image_t * image)
{
  GstFlowReturn flow_ret = GST_FLOW_OK;
  gint width, height, channels;
  gint i, j;
  gboolean negotiate = FALSE;
  jas_clrspc_t clrspc;
  GstCaps *allowed_caps, *caps;

  width = jas_image_width (image);
  height = jas_image_height (image);
  channels = jas_image_numcmpts (image);

  GST_LOG_OBJECT (dec, "%d x %d, %d components", width, height, channels);

  /* jp2c bitstream has no real colour space info (kept in container),
   * so decoder may only pretend to know, where it really does not */
  if (!jas_clrspc_isunknown (dec->clrspc)) {
    clrspc = dec->clrspc;
    GST_DEBUG_OBJECT (dec, "forcing container supplied colour space %d",
        clrspc);
    jas_image_setclrspc (image, clrspc);
  } else
    clrspc = jas_image_clrspc (image);

  if (!width || !height || !channels || jas_clrspc_isunknown (clrspc))
    goto fail_image;

  if (dec->width != width || dec->height != height ||
      dec->channels != channels || dec->clrspc != clrspc)
    negotiate = TRUE;

  if (channels != 3)
    goto not_supported;

  for (i = 0; i < channels; i++) {
    gint cheight, cwidth, depth, sgnd;

    cheight = jas_image_cmptheight (image, i);
    cwidth = jas_image_cmptwidth (image, i);
    depth = jas_image_cmptprec (image, i);
    sgnd = jas_image_cmptsgnd (image, i);

    GST_LOG_OBJECT (dec, "image component %d, %dx%d, depth %d, sgnd %d", i,
        cwidth, cheight, depth, sgnd);

    if (depth != 8 || sgnd)
      goto not_supported;

    if (dec->cheight[i] != cheight || dec->cwidth[i] != cwidth) {
      dec->cheight[i] = cheight;
      dec->cwidth[i] = cwidth;
      negotiate = TRUE;
    }
  }

  if (!negotiate && dec->format != GST_VIDEO_FORMAT_UNKNOWN)
    goto done;

  /* clear and refresh to new state */
  flow_ret = GST_FLOW_NOT_NEGOTIATED;
  dec->format = GST_VIDEO_FORMAT_UNKNOWN;
  dec->width = width;
  dec->height = height;
  dec->channels = channels;

  /* retrieve allowed caps, and find the first one that reasonably maps
   * to the parameters of the colourspace */
  caps = gst_pad_get_allowed_caps (dec->srcpad);
  if (!caps) {
    GST_DEBUG_OBJECT (dec, "... but no peer, using template caps");
    /* need to copy because get_allowed_caps returns a ref,
       and get_pad_template_caps doesn't */
    caps = gst_caps_copy (gst_pad_get_pad_template_caps (dec->srcpad));
  }
  /* avoid lists of fourcc, etc */
  allowed_caps = gst_caps_normalize (caps);
  caps = NULL;
  GST_LOG_OBJECT (dec, "allowed source caps %" GST_PTR_FORMAT, allowed_caps);

  for (i = 0; i < gst_caps_get_size (allowed_caps); i++) {
    GstVideoFormat format;
    gboolean ok;

    if (caps)
      gst_caps_unref (caps);
    caps = gst_caps_copy_nth (allowed_caps, i);
    /* sigh, ds and _parse_caps need fixed caps for parsing, fixate */
    gst_pad_fixate_caps (dec->srcpad, caps);
    GST_LOG_OBJECT (dec, "checking caps %" GST_PTR_FORMAT, caps);
    if (!gst_video_format_parse_caps (caps, &format, NULL, NULL))
      continue;
    if (gst_video_format_is_rgb (format) &&
        jas_clrspc_fam (clrspc) == JAS_CLRSPC_FAM_RGB) {
      GST_DEBUG_OBJECT (dec, "trying RGB");
      if ((dec->cmpt[0] = jas_image_getcmptbytype (image,
                  JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_RGB_R))) < 0 ||
          (dec->cmpt[1] = jas_image_getcmptbytype (image,
                  JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_RGB_G))) < 0 ||
          (dec->cmpt[2] = jas_image_getcmptbytype (image,
                  JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_RGB_B))) < 0) {
        GST_DEBUG_OBJECT (dec, "missing RGB color component");
        continue;
      }
    } else if (gst_video_format_is_yuv (format) &&
        jas_clrspc_fam (clrspc) == JAS_CLRSPC_FAM_YCBCR) {
      GST_DEBUG_OBJECT (dec, "trying YUV");
      if ((dec->cmpt[0] = jas_image_getcmptbytype (image,
                  JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_YCBCR_Y))) < 0 ||
          (dec->cmpt[1] = jas_image_getcmptbytype (image,
                  JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_YCBCR_CB))) < 0 ||
          (dec->cmpt[2] = jas_image_getcmptbytype (image,
                  JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_YCBCR_CR))) < 0) {
        GST_DEBUG_OBJECT (dec, "missing YUV color component");
        continue;
      }
    } else
      continue;
    /* match format with validity checks */
    ok = TRUE;
    for (j = 0; j < channels; j++) {
      gint cmpt;

      cmpt = dec->cmpt[j];
      if (dec->cwidth[cmpt] != gst_video_format_get_component_width (format, j,
              width) ||
          dec->cheight[cmpt] != gst_video_format_get_component_height (format,
              j, height))
        ok = FALSE;
    }
    /* commit to this format */
    if (ok) {
      dec->format = format;
      break;
    }
  }

  if (caps)
    gst_caps_unref (caps);
  gst_caps_unref (allowed_caps);

  if (dec->format != GST_VIDEO_FORMAT_UNKNOWN) {
    /* cache some video format properties */
    for (j = 0; j < channels; ++j) {
      dec->offset[j] = gst_video_format_get_component_offset (dec->format, j,
          dec->width, dec->height);
      dec->inc[j] = gst_video_format_get_pixel_stride (dec->format, j);
      dec->stride[j] = gst_video_format_get_row_stride (dec->format, j,
          dec->width);
    }
    dec->image_size = gst_video_format_get_size (dec->format, width, height);
    dec->alpha = gst_video_format_has_alpha (dec->format);

    if (dec->buf)
      g_free (dec->buf);
    dec->buf = g_new0 (glong, dec->width);

    caps = gst_video_format_new_caps (dec->format, dec->width, dec->height,
        dec->framerate_numerator, dec->framerate_denominator, 1, 1);

    GST_DEBUG_OBJECT (dec, "Set format to %d, size to %dx%d", dec->format,
        dec->width, dec->height);

    if (!gst_pad_set_caps (dec->srcpad, caps))
      flow_ret = GST_FLOW_NOT_NEGOTIATED;
    else
      flow_ret = GST_FLOW_OK;

    gst_caps_unref (caps);
  }

done:
  return flow_ret;

  /* ERRORS */
fail_image:
  {
    GST_DEBUG_OBJECT (dec, "Failed to process decoded image.");
    flow_ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }
not_supported:
  {
    GST_DEBUG_OBJECT (dec, "Decoded image has unsupported colour space.");
    GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("Unsupported colorspace"));
    flow_ret = GST_FLOW_ERROR;
    goto done;
  }
}
コード例 #19
0
static gboolean
gst_deinterlace2_setcaps (GstPad * pad, GstCaps * caps)
{
  gboolean res = TRUE;
  GstDeinterlace2 *self = GST_DEINTERLACE2 (gst_pad_get_parent (pad));
  GstPad *otherpad;
  GstStructure *structure;
  GstVideoFormat fmt;
  guint32 fourcc;
  GstCaps *othercaps;

  otherpad = (pad == self->srcpad) ? self->sinkpad : self->srcpad;

  structure = gst_caps_get_structure (caps, 0);

  res = gst_structure_get_int (structure, "width", &self->frame_width);
  res &= gst_structure_get_int (structure, "height", &self->frame_height);
  res &=
      gst_structure_get_fraction (structure, "framerate", &self->frame_rate_n,
      &self->frame_rate_d);
  res &= gst_structure_get_fourcc (structure, "format", &fourcc);
  /* TODO: get interlaced, field_layout, field_order */
  if (!res)
    goto invalid_caps;

  if (self->fields == GST_DEINTERLACE2_ALL) {
    gint fps_n = self->frame_rate_n, fps_d = self->frame_rate_d;

    if (!gst_fraction_double (&fps_n, &fps_d, otherpad != self->srcpad))
      goto invalid_caps;

    othercaps = gst_caps_copy (caps);

    gst_caps_set_simple (othercaps, "framerate", GST_TYPE_FRACTION, fps_n,
        fps_d, NULL);
  } else {
    othercaps = gst_caps_ref (caps);
  }

  if (!gst_pad_set_caps (otherpad, othercaps))
    goto caps_not_accepted;
  gst_caps_unref (othercaps);

  /* TODO: introduce self->field_stride */
  self->field_height = self->frame_height / 2;

  fmt = gst_video_format_from_fourcc (fourcc);

  /* TODO: only true if fields are subbuffers of interlaced frames,
     change when the buffer-fields concept has landed */
  self->field_stride =
      gst_video_format_get_row_stride (fmt, 0, self->frame_width) * 2;
  self->output_stride =
      gst_video_format_get_row_stride (fmt, 0, self->frame_width);

  /* in bytes */
  self->line_length =
      gst_video_format_get_row_stride (fmt, 0, self->frame_width);
  self->frame_size =
      gst_video_format_get_size (fmt, self->frame_width, self->frame_height);

  if (self->fields == GST_DEINTERLACE2_ALL && otherpad == self->srcpad)
    self->field_duration =
        gst_util_uint64_scale (GST_SECOND, self->frame_rate_d,
        self->frame_rate_n);
  else
    self->field_duration =
        gst_util_uint64_scale (GST_SECOND, self->frame_rate_d,
        2 * self->frame_rate_n);

  GST_DEBUG_OBJECT (self, "Set caps: %" GST_PTR_FORMAT, caps);

done:

  gst_object_unref (self);
  return res;

invalid_caps:
  res = FALSE;
  GST_ERROR_OBJECT (pad, "Invalid caps: %" GST_PTR_FORMAT, caps);
  goto done;

caps_not_accepted:
  res = FALSE;
  GST_ERROR_OBJECT (pad, "Caps not accepted: %" GST_PTR_FORMAT, othercaps);
  gst_caps_unref (othercaps);
  goto done;
}
コード例 #20
0
static GstFlowReturn
gst_patchdetect_transform_ip (GstBaseTransform * trans, GstBuffer * buf)
{
  GstPatchdetect *patchdetect = GST_PATCHDETECT (trans);
  Frame frame;
  Point *points;
  int i, j;
  int blocks_x, blocks_y;
  int n_points;
  int n_patches;
  Patch *patches;
  guint8 *patchpix;
  int vec1_x, vec1_y;
  int vec2_x, vec2_y;
  Color detected_colors[24];
  gboolean detected = FALSE;

  frame.y = GST_BUFFER_DATA (buf);
  frame.ystride = gst_video_format_get_row_stride (patchdetect->format,
      0, patchdetect->width);
  frame.u =
      frame.y + gst_video_format_get_component_offset (patchdetect->format, 1,
      patchdetect->width, patchdetect->height);
  frame.ustride =
      gst_video_format_get_row_stride (patchdetect->format, 1,
      patchdetect->width);
  frame.v =
      frame.y + gst_video_format_get_component_offset (patchdetect->format, 2,
      patchdetect->width, patchdetect->height);
  frame.vstride =
      gst_video_format_get_row_stride (patchdetect->format, 2,
      patchdetect->width);
  frame.width = patchdetect->width;
  frame.height = patchdetect->height;
  frame.t = patchdetect->t;
  patchdetect->t++;

  blocks_y = (patchdetect->height & (~7)) / 8;
  blocks_x = (patchdetect->width & (~7)) / 8;

  patchpix = g_malloc0 (patchdetect->width * patchdetect->height);
  patches = g_malloc0 (sizeof (Patch) * 256);

  n_patches = 0;
  for (j = 0; j < blocks_y; j += 4) {
    for (i = 0; i < blocks_x; i += 4) {
      Stats block = { 0 };

      get_block_stats (&frame, i * 8, j * 8, &block);

      patches[n_patches].val = n_patches + 2;
      if (block.match) {
        if (patch_check (&frame, patchpix, i * 8, j * 8, 8, 8)) {
          patch_start (&frame, patchpix, patches + n_patches, i * 8, j * 8, 8,
              8);

          patches[n_patches].y = block.y;
          patches[n_patches].u = block.u;
          patches[n_patches].v = block.v;

          patch_grow (&frame, patchpix, patches + n_patches);
          n_patches++;
          g_assert (n_patches < 256);
        }
      }
    }
  }

  {
    int n;

    for (n = 0; n < n_patches; n++) {
      Patch *patch = &patches[n];
      int xsum;
      int ysum;

      if (patch->count > 10000)
        continue;
      patch->valid = TRUE;

      xsum = 0;
      ysum = 0;
      for (j = patch->ymin; j < patch->ymax; j++) {
        for (i = patch->xmin; i < patch->xmax; i++) {
          if (patchpix[j * frame.width + i] != patch->val)
            continue;
          xsum += i;
          ysum += j;
        }
      }

      patch->cen_x = xsum / patch->count;
      patch->cen_y = ysum / patch->count;
    }

  }

  points = g_malloc0 (sizeof (Point) * 1000);
  n_points = 0;

  for (i = 0; i < n_patches; i++) {
    for (j = i + 1; j < n_patches; j++) {
      int dist_x, dist_y;

      if (i == j)
        continue;

      dist_x = patches[i].cen_x - patches[j].cen_x;
      dist_y = patches[i].cen_y - patches[j].cen_y;

      if (dist_x < 0) {
        dist_x = -dist_x;
        dist_y = -dist_y;
      }
      if (ABS (2 * dist_y) < dist_x && dist_x < 100) {
        points[n_points].x = dist_x;
        points[n_points].y = dist_y;
        points[n_points].valid = TRUE;
        points[n_points].patch1 = i;
        points[n_points].patch2 = j;
        n_points++;
        g_assert (n_points < 1000);
      }
    }
  }

  {
    int dist;
    int ave_x = 0, ave_y = 0;
    for (dist = 50; dist >= 10; dist -= 5) {
      int sum_x, sum_y;
      int n_valid;

      sum_x = 0;
      sum_y = 0;
      n_valid = 0;
      for (i = 0; i < n_points; i++) {
        if (!points[i].valid)
          continue;
        sum_x += points[i].x;
        sum_y += points[i].y;
        n_valid++;
      }
      if (n_valid == 0)
        continue;
      ave_x = sum_x / n_valid;
      ave_y = sum_y / n_valid;

      for (i = 0; i < n_points; i++) {
        int d;
        if (!points[i].valid)
          continue;
        d = (points[i].x - ave_x) * (points[i].x - ave_x);
        d += (points[i].y - ave_y) * (points[i].y - ave_y);
        if (d > dist * dist)
          points[i].valid = FALSE;
      }
    }
    vec1_x = ave_x;
    vec1_y = ave_y;
  }

  n_points = 0;
  for (i = 0; i < n_patches; i++) {
    for (j = i + 1; j < n_patches; j++) {
      int dist_x, dist_y;

      if (i == j)
        continue;

      dist_x = patches[i].cen_x - patches[j].cen_x;
      dist_y = patches[i].cen_y - patches[j].cen_y;

      if (dist_y < 0) {
        dist_x = -dist_x;
        dist_y = -dist_y;
      }
      if (ABS (2 * dist_x) < dist_y && dist_y < 100) {
        points[n_points].x = dist_x;
        points[n_points].y = dist_y;
        points[n_points].valid = TRUE;
        points[n_points].patch1 = i;
        points[n_points].patch2 = j;
        n_points++;
        g_assert (n_points < 1000);
      }
    }
  }

  {
    int dist;
    int ave_x = 0, ave_y = 0;
    for (dist = 50; dist >= 10; dist -= 5) {
      int sum_x, sum_y;
      int n_valid;

      sum_x = 0;
      sum_y = 0;
      n_valid = 0;
      for (i = 0; i < n_points; i++) {
        if (!points[i].valid)
          continue;
        sum_x += points[i].x;
        sum_y += points[i].y;
        n_valid++;
      }
      if (n_valid == 0)
        continue;
      ave_x = sum_x / n_valid;
      ave_y = sum_y / n_valid;

      for (i = 0; i < n_points; i++) {
        int d;
        if (!points[i].valid)
          continue;
        d = (points[i].x - ave_x) * (points[i].x - ave_x);
        d += (points[i].y - ave_y) * (points[i].y - ave_y);
        if (d > dist * dist)
          points[i].valid = FALSE;
      }
    }
    vec2_x = ave_x;
    vec2_y = ave_y;
  }

#if 0
  for (i = 0; i < n_points; i++) {
    if (!points[i].valid)
      continue;
    paint_block (&frame, 4 * points[i].x, 240 + 4 * points[i].y, 16);
  }
#endif
#if 0
  paint_block (&frame, 360, 240, 16);
  paint_block (&frame, 360 + vec1_x, 240 + vec1_y, 16);
  paint_block (&frame, 360 + vec2_x, 240 + vec2_y, 16);
#endif

  {
    double m00, m01, m10, m11;
    double det;
    double v1, v2;
    double ave_v1 = 0, ave_v2 = 0;

    det = vec1_x * vec2_y - vec1_y * vec2_x;
    m00 = vec2_y / det;
    m01 = -vec2_x / det;
    m10 = -vec1_y / det;
    m11 = vec1_x / det;

    for (i = 0; i < n_patches - 1; i++) {
      int count = 0;
      double sum_v1 = 0;
      double sum_v2 = 0;

      if (!patches[i].valid)
        continue;

      n_points = 0;
      for (j = i + 1; j < n_patches; j++) {
        int diff_x = patches[j].cen_x - patches[i].cen_x;
        int diff_y = patches[j].cen_y - patches[i].cen_y;

        if (!patches[j].valid)
          continue;

        v1 = diff_x * m00 + diff_y * m01;
        v2 = diff_x * m10 + diff_y * m11;

        if (v1 > -0.5 && v1 < 5.5 && v2 > -0.5 && v2 < 3.5 &&
            ABS (v1 - rint (v1)) < 0.1 && ABS (v2 - rint (v2)) < 0.1) {
          sum_v1 += v1 - rint (v1);
          sum_v2 += v2 - rint (v2);
          count++;
        }
      }
      ave_v1 = sum_v1 / count;
      ave_v2 = sum_v2 / count;

      if (count > 20) {
        int k;
        for (j = 0; j < 4; j++) {
          for (k = 0; k < 6; k++) {
            Stats block;

            int xx;
            int yy;
            xx = patches[i].cen_x + (ave_v1 + k) * vec1_x + (ave_v2 +
                j) * vec2_x;
            yy = patches[i].cen_y + (ave_v1 + k) * vec1_y + (ave_v2 +
                j) * vec2_y;

            get_block_stats (&frame, xx - 4, yy - 4, &block);
            //GST_ERROR("%d %d: %d %d %d", k, j, block.y, block.u, block.v);

            detected_colors[k + j * 6].y = block.y;
            detected_colors[k + j * 6].u = block.u;
            detected_colors[k + j * 6].v = block.v;

            paint_block (&frame, xx - 4, yy - 4, 16);
          }
        }

        detected = TRUE;

#if 0
        for (j = i + 1; j < n_patches; j++) {
          int diff_x = patches[j].cen_x - patches[i].cen_x;
          int diff_y = patches[j].cen_y - patches[i].cen_y;
          int xx;
          int yy;

          if (!patches[j].valid)
            continue;

          v1 = diff_x * m00 + diff_y * m01;
          v2 = diff_x * m10 + diff_y * m11;

          if (v1 > -0.5 && v1 < 5.5 && v2 > -0.5 && v2 < 3.5 &&
              ABS (v1 - rint (v1)) < 0.1 && ABS (v2 - rint (v2)) < 0.1) {
            v1 = rint (v1);
            v2 = rint (v2);
            xx = patches[i].cen_x + (ave_v1 + v1) * vec1_x + (ave_v2 +
                v2) * vec2_x;
            yy = patches[i].cen_y + (ave_v1 + v1) * vec1_y + (ave_v2 +
                v2) * vec2_y;

            paint_block (&frame, patches[j].cen_x, patches[j].cen_y, 128);
            paint_block (&frame, xx, yy, 16);
          }
        }
        paint_block (&frame, patches[i].cen_x, patches[i].cen_y, 240);
#endif
        break;
      }
    }
  }

#define N 10
  if (detected) {
    int i, j, k;
    int n = N;
    double diff = 0;
    double matrix[10][10] = { {0} };
    double vy[10] = { 0 };
    double vu[10] = { 0 };
    double vv[10] = { 0 };
    double *by = patchdetect->by;
    double *bu = patchdetect->bu;
    double *bv = patchdetect->bv;
    double flip_diff = 0;

    for (i = 0; i < 24; i++) {
      diff += ABS (detected_colors[i].y - patch_colors[i].y);
      diff += ABS (detected_colors[i].u - patch_colors[i].u);
      diff += ABS (detected_colors[i].v - patch_colors[i].v);

      flip_diff += ABS (detected_colors[23 - i].y - patch_colors[i].y);
      flip_diff += ABS (detected_colors[23 - i].u - patch_colors[i].u);
      flip_diff += ABS (detected_colors[23 - i].v - patch_colors[i].v);
    }
    GST_ERROR ("uncorrected error %g (flipped %g)", diff / 24.0,
        flip_diff / 24.0);
    if (flip_diff < diff) {
      for (i = 0; i < 12; i++) {
        Color tmp;
        tmp = detected_colors[i];
        detected_colors[i] = detected_colors[23 - i];
        detected_colors[23 - i] = tmp;
      }
    }

    for (i = 0; i < 24; i++) {
      int dy = detected_colors[i].y - patch_colors[i].y;
      int du = detected_colors[i].u - patch_colors[i].u;
      int dv = detected_colors[i].v - patch_colors[i].v;
      int py = detected_colors[i].y - 128;
      int pu = detected_colors[i].u - 128;
      int pv = detected_colors[i].v - 128;
      int w = (i < 18) ? 1 : 2;
      double z[10];

      diff += ABS (dy) + ABS (du) + ABS (dv);

      z[0] = 1;
      z[1] = py;
      z[2] = pu;
      z[3] = pv;
      z[4] = py * py;
      z[5] = py * pu;
      z[6] = py * pv;
      z[7] = pu * pu;
      z[8] = pu * pv;
      z[9] = pv * pv;

      for (j = 0; j < n; j++) {
        for (k = 0; k < n; k++) {
          matrix[j][k] += w * z[j] * z[k];
        }

        vy[j] += w * dy * z[j];
        vu[j] += w * du * z[j];
        vv[j] += w * dv * z[j];
      }
    }

    invert_matrix (matrix, n);

    for (i = 0; i < n; i++) {
      by[i] = 0;
      bu[i] = 0;
      bv[i] = 0;
      for (j = 0; j < n; j++) {
        by[i] += matrix[i][j] * vy[j];
        bu[i] += matrix[i][j] * vu[j];
        bv[i] += matrix[i][j] * vv[j];
      }
    }

    //GST_ERROR("a %g %g %g b %g %g %g", ay, au, av, by, bu, bv);

    diff = 0;
    for (i = 0; i < 24; i++) {
      double cy, cu, cv;
      double z[10];
      int py = detected_colors[i].y - 128;
      int pu = detected_colors[i].u - 128;
      int pv = detected_colors[i].v - 128;

      z[0] = 1;
      z[1] = py;
      z[2] = pu;
      z[3] = pv;
      z[4] = py * py;
      z[5] = py * pu;
      z[6] = py * pv;
      z[7] = pu * pu;
      z[8] = pu * pv;
      z[9] = pv * pv;

      cy = 0;
      cu = 0;
      cv = 0;
      for (j = 0; j < n; j++) {
        cy += by[j] * z[j];
        cu += bu[j] * z[j];
        cv += bv[j] * z[j];
      }

      diff += fabs (patch_colors[i].y - (128 + py - cy));
      diff += fabs (patch_colors[i].u - (128 + pu - cu));
      diff += fabs (patch_colors[i].v - (128 + pv - cv));
    }
    GST_ERROR ("average error %g", diff / 24.0);
    patchdetect->valid = 3000;
  }

  if (patchdetect->valid > 0) {
    int n = N;
    guint8 *u1, *u2;
    guint8 *v1, *v2;
    double *by = patchdetect->by;
    double *bu = patchdetect->bu;
    double *bv = patchdetect->bv;

    patchdetect->valid--;
    u1 = g_malloc (frame.width);
    u2 = g_malloc (frame.width);
    v1 = g_malloc (frame.width);
    v2 = g_malloc (frame.width);

    for (j = 0; j < frame.height; j += 2) {
      for (i = 0; i < frame.width / 2; i++) {
        u1[2 * i + 0] = frame.u[(j / 2) * frame.ustride + i];
        u1[2 * i + 1] = u1[2 * i + 0];
        u2[2 * i + 0] = u1[2 * i + 0];
        u2[2 * i + 1] = u1[2 * i + 0];
        v1[2 * i + 0] = frame.v[(j / 2) * frame.vstride + i];
        v1[2 * i + 1] = v1[2 * i + 0];
        v2[2 * i + 0] = v1[2 * i + 0];
        v2[2 * i + 1] = v1[2 * i + 0];
      }
      for (i = 0; i < frame.width; i++) {
        int k;
        double z[10];
        double cy, cu, cv;
        int y, u, v;
        int py, pu, pv;

        y = frame.y[(j + 0) * frame.ystride + i];
        u = u1[i];
        v = v1[i];

        py = y - 128;
        pu = u - 128;
        pv = v - 128;

        z[0] = 1;
        z[1] = py;
        z[2] = pu;
        z[3] = pv;
        z[4] = py * py;
        z[5] = py * pu;
        z[6] = py * pv;
        z[7] = pu * pu;
        z[8] = pu * pv;
        z[9] = pv * pv;

        cy = 0;
        cu = 0;
        cv = 0;
        for (k = 0; k < n; k++) {
          cy += by[k] * z[k];
          cu += bu[k] * z[k];
          cv += bv[k] * z[k];
        }

        frame.y[(j + 0) * frame.ystride + i] = CLAMP (rint (y - cy), 0, 255);
        u1[i] = CLAMP (rint (u - cu), 0, 255);
        v1[i] = CLAMP (rint (v - cv), 0, 255);

        y = frame.y[(j + 1) * frame.ystride + i];
        u = u2[i];
        v = v2[i];

        py = y - 128;
        pu = u - 128;
        pv = v - 128;

        z[0] = 1;
        z[1] = py;
        z[2] = pu;
        z[3] = pv;
        z[4] = py * py;
        z[5] = py * pu;
        z[6] = py * pv;
        z[7] = pu * pu;
        z[8] = pu * pv;
        z[9] = pv * pv;

        cy = 0;
        cu = 0;
        cv = 0;
        for (k = 0; k < n; k++) {
          cy += by[k] * z[k];
          cu += bu[k] * z[k];
          cv += bv[k] * z[k];
        }

        frame.y[(j + 1) * frame.ystride + i] = CLAMP (rint (y - cy), 0, 255);
        u2[i] = CLAMP (rint (u - cu), 0, 255);
        v2[i] = CLAMP (rint (v - cv), 0, 255);
      }
      for (i = 0; i < frame.width / 2; i++) {
        frame.u[(j / 2) * frame.ustride + i] = (u1[2 * i + 0] +
            u1[2 * i + 1] + u2[2 * i + 0] + u2[2 * i + 1] + 2) >> 2;
        frame.v[(j / 2) * frame.vstride + i] = (v1[2 * i + 0] +
            v1[2 * i + 1] + v2[2 * i + 0] + v2[2 * i + 1] + 2) >> 2;
      }
    }

    g_free (u1);
    g_free (u2);
    g_free (v1);
    g_free (v2);
  }
コード例 #21
0
ファイル: gstvideomark.c プロジェクト: ChinnaSuhas/ossbuild
static GstFlowReturn
gst_video_mark_yuv (GstVideoMark * videomark, GstBuffer * buffer)
{
  GstVideoFormat format;
  gint i, pw, ph, row_stride, pixel_stride, offset;
  gint width, height, req_width, req_height;
  guint8 *d, *data;
  guint64 pattern_shift;
  guint8 color;

  data = GST_BUFFER_DATA (buffer);

  format = videomark->format;
  width = videomark->width;
  height = videomark->height;

  pw = videomark->pattern_width;
  ph = videomark->pattern_height;
  row_stride = gst_video_format_get_row_stride (format, 0, width);
  pixel_stride = gst_video_format_get_pixel_stride (format, 0);
  offset = gst_video_format_get_component_offset (format, 0, width, height);

  req_width =
      (videomark->pattern_count + videomark->pattern_data_count) * pw +
      videomark->left_offset;
  req_height = videomark->bottom_offset + ph;
  if (req_width > width || req_height > height) {
    GST_ELEMENT_ERROR (videomark, STREAM, WRONG_TYPE, (NULL),
        ("videomark pattern doesn't fit video, need at least %ix%i (stream has %ix%i)",
            req_width, req_height, width, height));
    return GST_FLOW_ERROR;
  }

  /* draw the bottom left pixels */
  for (i = 0; i < videomark->pattern_count; i++) {
    d = data + offset;
    /* move to start of bottom left */
    d += row_stride * (height - ph - videomark->bottom_offset) +
        pixel_stride * videomark->left_offset;
    /* move to i-th pattern */
    d += pixel_stride * pw * i;

    if (i & 1)
      /* odd pixels must be white */
      color = 255;
    else
      color = 0;

    /* draw box of width * height */
    gst_video_mark_draw_box (videomark, d, pw, ph, row_stride, pixel_stride,
        color);
  }

  pattern_shift = G_GUINT64_CONSTANT (1) << (videomark->pattern_data_count - 1);

  /* get the data of the pattern */
  for (i = 0; i < videomark->pattern_data_count; i++) {
    d = data + offset;
    /* move to start of bottom left, adjust for offsets */
    d += row_stride * (height - ph - videomark->bottom_offset) +
        pixel_stride * videomark->left_offset;
    /* move after the fixed pattern */
    d += pixel_stride * videomark->pattern_count * pw;
    /* move to i-th pattern data */
    d += pixel_stride * pw * i;

    if (videomark->pattern_data & pattern_shift)
      color = 255;
    else
      color = 0;

    gst_video_mark_draw_box (videomark, d, pw, ph, row_stride, pixel_stride,
        color);

    pattern_shift >>= 1;
  }

  return GST_FLOW_OK;
}
コード例 #22
0
static void
gst_deinterlace_method_setup_impl (GstDeinterlaceMethod * self,
    GstVideoFormat format, gint width, gint height)
{
  gint i;
  GstDeinterlaceMethodClass *klass = GST_DEINTERLACE_METHOD_GET_CLASS (self);

  self->format = format;
  self->frame_width = width;
  self->frame_height = height;

  self->deinterlace_frame = NULL;

  if (format == GST_VIDEO_FORMAT_UNKNOWN)
    return;

  for (i = 0; i < 4; i++) {
    self->width[i] = gst_video_format_get_component_width (format, i, width);
    self->height[i] = gst_video_format_get_component_height (format, i, height);
    self->offset[i] =
        gst_video_format_get_component_offset (format, i, width, height);
    self->row_stride[i] = gst_video_format_get_row_stride (format, i, width);
    self->pixel_stride[i] = gst_video_format_get_pixel_stride (format, i);
  }

  switch (format) {
    case GST_VIDEO_FORMAT_YUY2:
      self->deinterlace_frame = klass->deinterlace_frame_yuy2;
      break;
    case GST_VIDEO_FORMAT_YVYU:
      self->deinterlace_frame = klass->deinterlace_frame_yvyu;
      break;
    case GST_VIDEO_FORMAT_UYVY:
      self->deinterlace_frame = klass->deinterlace_frame_uyvy;
      break;
    case GST_VIDEO_FORMAT_I420:
      self->deinterlace_frame = klass->deinterlace_frame_i420;
      break;
    case GST_VIDEO_FORMAT_YV12:
      self->deinterlace_frame = klass->deinterlace_frame_yv12;
      break;
    case GST_VIDEO_FORMAT_Y444:
      self->deinterlace_frame = klass->deinterlace_frame_y444;
      break;
    case GST_VIDEO_FORMAT_Y42B:
      self->deinterlace_frame = klass->deinterlace_frame_y42b;
      break;
    case GST_VIDEO_FORMAT_Y41B:
      self->deinterlace_frame = klass->deinterlace_frame_y41b;
      break;
    case GST_VIDEO_FORMAT_AYUV:
      self->deinterlace_frame = klass->deinterlace_frame_ayuv;
      break;
    case GST_VIDEO_FORMAT_ARGB:
    case GST_VIDEO_FORMAT_xRGB:
      self->deinterlace_frame = klass->deinterlace_frame_argb;
      break;
    case GST_VIDEO_FORMAT_ABGR:
    case GST_VIDEO_FORMAT_xBGR:
      self->deinterlace_frame = klass->deinterlace_frame_abgr;
      break;
    case GST_VIDEO_FORMAT_RGBA:
    case GST_VIDEO_FORMAT_RGBx:
      self->deinterlace_frame = klass->deinterlace_frame_rgba;
      break;
    case GST_VIDEO_FORMAT_BGRA:
    case GST_VIDEO_FORMAT_BGRx:
      self->deinterlace_frame = klass->deinterlace_frame_bgra;
      break;
    case GST_VIDEO_FORMAT_RGB:
      self->deinterlace_frame = klass->deinterlace_frame_rgb;
      break;
    case GST_VIDEO_FORMAT_BGR:
      self->deinterlace_frame = klass->deinterlace_frame_bgr;
      break;
    default:
      self->deinterlace_frame = NULL;
      break;
  }
}
コード例 #23
0
static gboolean
gst_jpegenc_setcaps (GstPad * pad, GstCaps * caps)
{
  GstJpegEnc *enc = GST_JPEGENC (gst_pad_get_parent (pad));
  GstVideoFormat format;
  gint width, height;
  gint fps_num, fps_den;
  gint par_num, par_den;
  gint i;
  GstCaps *othercaps;
  gboolean ret;

  /* get info from caps */
  if (!gst_video_format_parse_caps (caps, &format, &width, &height))
    goto refuse_caps;
  /* optional; pass along if present */
  fps_num = fps_den = -1;
  par_num = par_den = -1;
  gst_video_parse_caps_framerate (caps, &fps_num, &fps_den);
  gst_video_parse_caps_pixel_aspect_ratio (caps, &par_num, &par_den);

  if (width == enc->width && height == enc->height && enc->format == format
      && fps_num == enc->fps_num && fps_den == enc->fps_den
      && par_num == enc->par_num && par_den == enc->par_den)
    return TRUE;

  /* store input description */
  enc->format = format;
  enc->width = width;
  enc->height = height;
  enc->fps_num = fps_num;
  enc->fps_den = fps_den;
  enc->par_num = par_num;
  enc->par_den = par_den;

  /* prepare a cached image description  */
  enc->channels = 3 + (gst_video_format_has_alpha (format) ? 1 : 0);
  /* ... but any alpha is disregarded in encoding */
  if (gst_video_format_is_gray (format))
    enc->channels = 1;
  else
    enc->channels = 3;
  enc->h_max_samp = 0;
  enc->v_max_samp = 0;
  for (i = 0; i < enc->channels; ++i) {
    enc->cwidth[i] = gst_video_format_get_component_width (format, i, width);
    enc->cheight[i] = gst_video_format_get_component_height (format, i, height);
    enc->offset[i] = gst_video_format_get_component_offset (format, i, width,
        height);
    enc->stride[i] = gst_video_format_get_row_stride (format, i, width);
    enc->inc[i] = gst_video_format_get_pixel_stride (format, i);
    enc->h_samp[i] = GST_ROUND_UP_4 (width) / enc->cwidth[i];
    enc->h_max_samp = MAX (enc->h_max_samp, enc->h_samp[i]);
    enc->v_samp[i] = GST_ROUND_UP_4 (height) / enc->cheight[i];
    enc->v_max_samp = MAX (enc->v_max_samp, enc->v_samp[i]);
  }
  /* samp should only be 1, 2 or 4 */
  g_assert (enc->h_max_samp <= 4);
  g_assert (enc->v_max_samp <= 4);
  /* now invert */
  /* maximum is invariant, as one of the components should have samp 1 */
  for (i = 0; i < enc->channels; ++i) {
    enc->h_samp[i] = enc->h_max_samp / enc->h_samp[i];
    enc->v_samp[i] = enc->v_max_samp / enc->v_samp[i];
  }
  enc->planar = (enc->inc[0] == 1 && enc->inc[1] == 1 && enc->inc[2] == 1);

  othercaps = gst_caps_copy (gst_pad_get_pad_template_caps (enc->srcpad));
  gst_caps_set_simple (othercaps,
      "width", G_TYPE_INT, enc->width, "height", G_TYPE_INT, enc->height, NULL);
  if (enc->fps_den > 0)
    gst_caps_set_simple (othercaps,
        "framerate", GST_TYPE_FRACTION, enc->fps_num, enc->fps_den, NULL);
  if (enc->par_den > 0)
    gst_caps_set_simple (othercaps,
        "pixel-aspect-ratio", GST_TYPE_FRACTION, enc->par_num, enc->par_den,
        NULL);

  ret = gst_pad_set_caps (enc->srcpad, othercaps);
  gst_caps_unref (othercaps);

  if (ret)
    gst_jpegenc_resync (enc);

  gst_object_unref (enc);

  return ret;

  /* ERRORS */
refuse_caps:
  {
    GST_WARNING_OBJECT (enc, "refused caps %" GST_PTR_FORMAT, caps);
    gst_object_unref (enc);
    return FALSE;
  }
}
コード例 #24
0
ファイル: gstsmptealpha.c プロジェクト: ChinnaSuhas/ossbuild
static void
gst_smpte_alpha_process_i420_ayuv (GstSMPTEAlpha * smpte, const guint8 * in,
    guint8 * out, GstMask * mask, gint width, gint height, gint border,
    gint pos)
{
  const guint8 *srcY;
  const guint8 *srcU;
  const guint8 *srcV;
  gint i, j;
  gint src_wrap, src_uv_wrap;
  gint y_stride, uv_stride;
  gboolean odd_width;
  const guint32 *maskp;
  gint value;
  gint min, max;

  if (border == 0)
    border++;

  min = pos - border;
  max = pos;
  GST_DEBUG_OBJECT (smpte, "pos %d, min %d, max %d, border %d", pos, min, max,
      border);

  maskp = mask->data;

  y_stride = gst_video_format_get_row_stride (smpte->in_format, 0, width);
  uv_stride = gst_video_format_get_row_stride (smpte->in_format, 1, width);

  src_wrap = y_stride - width;
  src_uv_wrap = uv_stride - (width / 2);

  srcY = in;
  srcU = in + gst_video_format_get_component_offset (smpte->in_format,
      1, width, height);
  srcV = in + gst_video_format_get_component_offset (smpte->in_format,
      2, width, height);

  odd_width = (width % 2 != 0);

  for (i = 0; i < height; i++) {
    for (j = 0; j < width / 2; j++) {
      value = *maskp++;
      *out++ = (0xff * ((CLAMP (value, min, max) - min) << 8) / border) >> 8;
      *out++ = *srcY++;
      *out++ = *srcU;
      *out++ = *srcV;
      value = *maskp++;
      *out++ = (0xff * ((CLAMP (value, min, max) - min) << 8) / border) >> 8;
      *out++ = *srcY++;
      *out++ = *srcU++;
      *out++ = *srcV++;
    }
    /* Might have one odd column left to do */
    if (odd_width) {
      value = *maskp++;
      *out++ = (0xff * ((CLAMP (value, min, max) - min) << 8) / border) >> 8;
      *out++ = *srcY++;
      *out++ = *srcU;
      *out++ = *srcV;
    }
    if (i % 2 == 0) {
      srcU -= width / 2;
      srcV -= width / 2;
    } else {
      srcU += src_uv_wrap;
      srcV += src_uv_wrap;
    }
    srcY += src_wrap;
  }
}
コード例 #25
0
ファイル: gstcompare.c プロジェクト: kanongil/gst-plugins-bad
static gdouble
gst_compare_ssim (GstCompare * comp, GstBuffer * buf1, GstBuffer * buf2)
{
  GstCaps *caps;
  GstVideoFormat format, f;
  gint width, height, w, h, i, comps;
  gdouble cssim[4], ssim, c[4] = { 1.0, 0.0, 0.0, 0.0 };
  guint8 *data1, *data2;

  caps = GST_BUFFER_CAPS (buf1);
  if (!caps)
    goto invalid_input;

  if (!gst_video_format_parse_caps (caps, &format, &width, &height))
    goto invalid_input;

  caps = GST_BUFFER_CAPS (buf2);
  if (!caps)
    goto invalid_input;

  if (!gst_video_format_parse_caps (caps, &f, &w, &h))
    goto invalid_input;

  if (f != format || w != width || h != height)
    return comp->threshold + 1;

  comps = gst_video_format_is_gray (format) ? 1 : 3;
  if (gst_video_format_has_alpha (format))
    comps += 1;

  /* note that some are reported both yuv and gray */
  for (i = 0; i < comps; ++i)
    c[i] = 1.0;
  /* increase luma weight if yuv */
  if (gst_video_format_is_yuv (format) && (comps > 1))
    c[0] = comps - 1;
  for (i = 0; i < comps; ++i)
    c[i] /= (gst_video_format_is_yuv (format) && (comps > 1)) ?
        2 * (comps - 1) : comps;

  data1 = GST_BUFFER_DATA (buf1);
  data2 = GST_BUFFER_DATA (buf2);
  for (i = 0; i < comps; i++) {
    gint offset, cw, ch, step, stride;

    /* only support most common formats */
    if (gst_video_format_get_component_depth (format, i) != 8)
      goto unsupported_input;
    offset = gst_video_format_get_component_offset (format, i, width, height);
    cw = gst_video_format_get_component_width (format, i, width);
    ch = gst_video_format_get_component_height (format, i, height);
    step = gst_video_format_get_pixel_stride (format, i);
    stride = gst_video_format_get_row_stride (format, i, width);

    GST_LOG_OBJECT (comp, "component %d", i);
    cssim[i] = gst_compare_ssim_component (comp, data1 + offset, data2 + offset,
        cw, ch, step, stride);
    GST_LOG_OBJECT (comp, "ssim[%d] = %f", i, cssim[i]);
  }

#ifndef GST_DISABLE_GST_DEBUG
  for (i = 0; i < 4; i++) {
    GST_DEBUG_OBJECT (comp, "ssim[%d] = %f, c[%d] = %f", i, cssim[i], i, c[i]);
  }
#endif

  ssim = cssim[0] * c[0] + cssim[1] * c[1] + cssim[2] * c[2] + cssim[3] * c[3];

  return ssim;

  /* ERRORS */
invalid_input:
  {
    GST_ERROR_OBJECT (comp, "ssim method needs raw video input");
    return 0;
  }
unsupported_input:
  {
    GST_ERROR_OBJECT (comp, "raw video format not supported %" GST_PTR_FORMAT,
        caps);
    return 0;
  }
}
コード例 #26
0
ファイル: gstvdpvideoyuv.c プロジェクト: zsx/ossbuild
GstFlowReturn
gst_vdp_video_yuv_transform (GstBaseTransform * trans, GstBuffer * inbuf,
    GstBuffer * outbuf)
{
  GstVdpVideoYUV *video_yuv = GST_VDP_VIDEO_YUV (trans);
  GstVdpDevice *device;
  VdpVideoSurface surface;

  device = GST_VDP_VIDEO_BUFFER (inbuf)->device;
  surface = GST_VDP_VIDEO_BUFFER (inbuf)->surface;

  switch (video_yuv->format) {
    case GST_MAKE_FOURCC ('Y', 'V', '1', '2'):
    {
      VdpStatus status;
      guint8 *data[3];
      guint32 stride[3];

      data[0] = GST_BUFFER_DATA (outbuf) +
          gst_video_format_get_component_offset (GST_VIDEO_FORMAT_YV12,
          0, video_yuv->width, video_yuv->height);
      data[1] = GST_BUFFER_DATA (outbuf) +
          gst_video_format_get_component_offset (GST_VIDEO_FORMAT_YV12,
          2, video_yuv->width, video_yuv->height);
      data[2] = GST_BUFFER_DATA (outbuf) +
          gst_video_format_get_component_offset (GST_VIDEO_FORMAT_YV12,
          1, video_yuv->width, video_yuv->height);

      stride[0] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_YV12,
          0, video_yuv->width);
      stride[1] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_YV12,
          2, video_yuv->width);
      stride[2] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_YV12,
          1, video_yuv->width);

      GST_LOG_OBJECT (video_yuv, "Entering vdp_video_surface_get_bits_ycbcr");
      status =
          device->vdp_video_surface_get_bits_ycbcr (surface,
          VDP_YCBCR_FORMAT_YV12, (void *) data, stride);
      GST_LOG_OBJECT (video_yuv,
          "Got status %d from vdp_video_surface_get_bits_ycbcr", status);
      if (G_UNLIKELY (status != VDP_STATUS_OK)) {
        GST_ELEMENT_ERROR (video_yuv, RESOURCE, READ,
            ("Couldn't get data from vdpau"),
            ("Error returned from vdpau was: %s",
                device->vdp_get_error_string (status)));
        return GST_FLOW_ERROR;
      }
      break;
    }
    case GST_MAKE_FOURCC ('I', '4', '2', '0'):
    {
      VdpStatus status;
      guint8 *data[3];
      guint32 stride[3];

      data[0] = GST_BUFFER_DATA (outbuf) +
          gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420,
          0, video_yuv->width, video_yuv->height);
      data[1] = GST_BUFFER_DATA (outbuf) +
          gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420,
          2, video_yuv->width, video_yuv->height);
      data[2] = GST_BUFFER_DATA (outbuf) +
          gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420,
          1, video_yuv->width, video_yuv->height);

      stride[0] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420,
          0, video_yuv->width);
      stride[1] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420,
          2, video_yuv->width);
      stride[2] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420,
          1, video_yuv->width);

      GST_LOG_OBJECT (video_yuv, "Entering vdp_video_surface_get_bits_ycbcr");
      status =
          device->vdp_video_surface_get_bits_ycbcr (surface,
          VDP_YCBCR_FORMAT_YV12, (void *) data, stride);
      GST_LOG_OBJECT (video_yuv,
          "Got status %d from vdp_video_surface_get_bits_ycbcr", status);
      if (G_UNLIKELY (status != VDP_STATUS_OK)) {
        GST_ELEMENT_ERROR (video_yuv, RESOURCE, READ,
            ("Couldn't get data from vdpau"),
            ("Error returned from vdpau was: %s",
                device->vdp_get_error_string (status)));
        return GST_FLOW_ERROR;
      }
      break;
    }
    case GST_MAKE_FOURCC ('N', 'V', '1', '2'):
    {
      VdpStatus status;
      guint8 *data[2];
      guint32 stride[2];

      data[0] = GST_BUFFER_DATA (outbuf);
      data[1] = GST_BUFFER_DATA (outbuf) + video_yuv->width * video_yuv->height;

      stride[0] = video_yuv->width;
      stride[1] = video_yuv->width;

      GST_LOG_OBJECT (video_yuv, "Entering vdp_video_surface_get_bits_ycbcr");
      status =
          device->vdp_video_surface_get_bits_ycbcr (surface,
          VDP_YCBCR_FORMAT_NV12, (void *) data, stride);
      GST_LOG_OBJECT (video_yuv,
          "Got status %d from vdp_video_surface_get_bits_ycbcr", status);
      if (G_UNLIKELY (status != VDP_STATUS_OK)) {
        GST_ELEMENT_ERROR (video_yuv, RESOURCE, READ,
            ("Couldn't get data from vdpau"),
            ("Error returned from vdpau was: %s",
                device->vdp_get_error_string (status)));
        return GST_FLOW_ERROR;
      }
      break;
    }
    case GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y'):
    {
      VdpStatus status;
      guint8 *data[1];
      guint32 stride[1];

      data[0] = GST_BUFFER_DATA (outbuf);

      stride[0] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_UYVY,
          0, video_yuv->width);

      GST_LOG_OBJECT (video_yuv, "Entering vdp_video_surface_get_bits_ycbcr");
      status =
          device->vdp_video_surface_get_bits_ycbcr (surface,
          VDP_YCBCR_FORMAT_UYVY, (void *) data, stride);
      GST_LOG_OBJECT (video_yuv,
          "Got status %d from vdp_video_surface_get_bits_ycbcr", status);
      if (G_UNLIKELY (status != VDP_STATUS_OK)) {
        GST_ELEMENT_ERROR (video_yuv, RESOURCE, READ,
            ("Couldn't get data from vdpau"),
            ("Error returned from vdpau was: %s",
                device->vdp_get_error_string (status)));
        return GST_FLOW_ERROR;
      }
      break;
    }
    case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
    {
      VdpStatus status;
      guint8 *data[1];
      guint32 stride[1];

      data[0] = GST_BUFFER_DATA (outbuf);

      stride[0] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_YUY2,
          0, video_yuv->width);

      GST_LOG_OBJECT (video_yuv, "Entering vdp_video_surface_get_bits_ycbcr");
      status =
          device->vdp_video_surface_get_bits_ycbcr (surface,
          VDP_YCBCR_FORMAT_YUYV, (void *) data, stride);
      GST_LOG_OBJECT (video_yuv,
          "Got status %d from vdp_video_surface_get_bits_ycbcr", status);
      if (G_UNLIKELY (status != VDP_STATUS_OK)) {
        GST_ELEMENT_ERROR (video_yuv, RESOURCE, READ,
            ("Couldn't get data from vdpau"),
            ("Error returned from vdpau was: %s",
                device->vdp_get_error_string (status)));
        return GST_FLOW_ERROR;
      }
      break;
    }
    default:
      break;
  }

  gst_buffer_copy_metadata (outbuf, inbuf,
      GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS);

  GST_LOG_OBJECT (video_yuv, "Pushing buffer with ts %" GST_TIME_FORMAT,
      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)));

  return GST_FLOW_OK;
}
コード例 #27
0
static void
gst_video_flip_packed_simple (GstVideoFlip * videoflip, guint8 * dest,
    const guint8 * src)
{
  gint x, y, z;
  guint8 const *s = src;
  guint8 *d = dest;
  GstVideoFormat format = videoflip->format;
  gint sw = videoflip->from_width;
  gint sh = videoflip->from_height;
  gint dw = videoflip->to_width;
  gint dh = videoflip->to_height;
  gint src_stride, dest_stride;
  gint bpp;

  src_stride = gst_video_format_get_row_stride (format, 0, sw);
  dest_stride = gst_video_format_get_row_stride (format, 0, dw);
  /* This is only true for non-subsampled formats! */
  bpp = gst_video_format_get_pixel_stride (format, 0);

  switch (videoflip->method) {
    case GST_VIDEO_FLIP_METHOD_90R:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x++) {
          for (z = 0; z < bpp; z++) {
            d[y * dest_stride + x * bpp + z] =
                s[(sh - 1 - x) * src_stride + y * bpp + z];
          }
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_90L:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x++) {
          for (z = 0; z < bpp; z++) {
            d[y * dest_stride + x * bpp + z] =
                s[x * src_stride + (sw - 1 - y) * bpp + z];
          }
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_180:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x++) {
          for (z = 0; z < bpp; z++) {
            d[y * dest_stride + x * bpp + z] =
                s[(sh - 1 - y) * src_stride + (sw - 1 - x) * bpp + z];
          }
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_HORIZ:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x++) {
          for (z = 0; z < bpp; z++) {
            d[y * dest_stride + x * bpp + z] =
                s[y * src_stride + (sw - 1 - x) * bpp + z];
          }
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_VERT:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x++) {
          for (z = 0; z < bpp; z++) {
            d[y * dest_stride + x * bpp + z] =
                s[(sh - 1 - y) * src_stride + x * bpp + z];
          }
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_TRANS:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x++) {
          for (z = 0; z < bpp; z++) {
            d[y * dest_stride + x * bpp + z] = s[x * src_stride + y * bpp + z];
          }
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_OTHER:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x++) {
          for (z = 0; z < bpp; z++) {
            d[y * dest_stride + x * bpp + z] =
                s[(sh - 1 - x) * src_stride + (sw - 1 - y) * bpp + z];
          }
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_IDENTITY:
      g_assert_not_reached ();
      break;
    default:
      g_assert_not_reached ();
      break;
  }
}
コード例 #28
0
static void
gst_video_balance_packed_rgb (GstVideoBalance * videobalance, guint8 * data)
{
  gint i, j, height;
  gint width, row_stride, row_wrap;
  gint pixel_stride;
  gint offsets[3];
  gint r, g, b;
  gint y, u, v;
  gint u_tmp, v_tmp;
  guint8 *tabley = videobalance->tabley;
  guint8 **tableu = videobalance->tableu;
  guint8 **tablev = videobalance->tablev;

  offsets[0] = gst_video_format_get_component_offset (videobalance->format, 0,
      videobalance->width, videobalance->height);
  offsets[1] = gst_video_format_get_component_offset (videobalance->format, 1,
      videobalance->width, videobalance->height);
  offsets[2] = gst_video_format_get_component_offset (videobalance->format, 2,
      videobalance->width, videobalance->height);

  width =
      gst_video_format_get_component_width (videobalance->format, 0,
      videobalance->width);
  height =
      gst_video_format_get_component_height (videobalance->format, 0,
      videobalance->height);
  row_stride =
      gst_video_format_get_row_stride (videobalance->format, 0,
      videobalance->width);
  pixel_stride = gst_video_format_get_pixel_stride (videobalance->format, 0);
  row_wrap = row_stride - pixel_stride * width;

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      r = data[offsets[0]];
      g = data[offsets[1]];
      b = data[offsets[2]];

      y = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 0, r, g, b);
      u_tmp = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 1, r, g, b);
      v_tmp = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 2, r, g, b);

      y = CLAMP (y, 0, 255);
      u_tmp = CLAMP (u_tmp, 0, 255);
      v_tmp = CLAMP (v_tmp, 0, 255);

      y = tabley[y];
      u = tableu[u_tmp][v_tmp];
      v = tablev[u_tmp][v_tmp];

      r = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 0, y, u, v);
      g = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 1, y, u, v);
      b = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 2, y, u, v);

      data[offsets[0]] = CLAMP (r, 0, 255);
      data[offsets[1]] = CLAMP (g, 0, 255);
      data[offsets[2]] = CLAMP (b, 0, 255);
      data += pixel_stride;
    }
    data += row_wrap;
  }
}
コード例 #29
0
ファイル: gstvideodetect.c プロジェクト: spunktsch/svtplayer
static void
gst_video_detect_yuv (GstVideoDetect * videodetect, GstBuffer * buffer)
{
  GstVideoFormat format;
  gdouble brightness;
  gint i, pw, ph, row_stride, pixel_stride, offset;
  gint width, height, req_width, req_height;
  guint8 *d, *data;
  guint64 pattern_data;

  data = GST_BUFFER_DATA (buffer);

  format = videodetect->format;
  width = videodetect->width;
  height = videodetect->height;

  pw = videodetect->pattern_width;
  ph = videodetect->pattern_height;
  row_stride = gst_video_format_get_row_stride (format, 0, width);
  pixel_stride = gst_video_format_get_pixel_stride (format, 0);
  offset = gst_video_format_get_component_offset (format, 0, width, height);

  req_width =
      (videodetect->pattern_count + videodetect->pattern_data_count) * pw +
      videodetect->left_offset;
  req_height = videodetect->bottom_offset + ph;
  if (req_width > width || req_height > height) {
    goto no_pattern;
  }

  /* analyse the bottom left pixels */
  for (i = 0; i < videodetect->pattern_count; i++) {
    d = data + offset;
    /* move to start of bottom left, adjust for offsets */
    d += row_stride * (height - ph - videodetect->bottom_offset) +
        pixel_stride * videodetect->left_offset;
    /* move to i-th pattern */
    d += pixel_stride * pw * i;

    /* calc brightness of width * height box */
    brightness = gst_video_detect_calc_brightness (videodetect, d, pw, ph,
        row_stride, pixel_stride);

    GST_DEBUG_OBJECT (videodetect, "brightness %f", brightness);

    if (i & 1) {
      /* odd pixels must be white, all pixels darker than the center +
       * sensitivity are considered wrong. */
      if (brightness <
          (videodetect->pattern_center + videodetect->pattern_sensitivity))
        goto no_pattern;
    } else {
      /* even pixels must be black, pixels lighter than the center - sensitivity
       * are considered wrong. */
      if (brightness >
          (videodetect->pattern_center - videodetect->pattern_sensitivity))
        goto no_pattern;
    }
  }
  GST_DEBUG_OBJECT (videodetect, "found pattern");

  pattern_data = 0;

  /* get the data of the pattern */
  for (i = 0; i < videodetect->pattern_data_count; i++) {
    d = data + offset;
    /* move to start of bottom left, adjust for offsets */
    d += row_stride * (height - ph - videodetect->bottom_offset) +
        pixel_stride * videodetect->left_offset;
    /* move after the fixed pattern */
    d += pixel_stride * (videodetect->pattern_count * pw);
    /* move to i-th pattern data */
    d += pixel_stride * pw * i;

    /* calc brightness of width * height box */
    brightness = gst_video_detect_calc_brightness (videodetect, d, pw, ph,
        row_stride, pixel_stride);
    /* update pattern, we just use the center to decide between black and white. */
    pattern_data <<= 1;
    if (brightness > videodetect->pattern_center)
      pattern_data |= 1;
  }

  GST_DEBUG_OBJECT (videodetect, "have data %" G_GUINT64_FORMAT, pattern_data);

  videodetect->in_pattern = TRUE;
  gst_video_detect_post_message (videodetect, buffer, pattern_data);

  return;

no_pattern:
  {
    GST_DEBUG_OBJECT (videodetect, "no pattern found");
    if (videodetect->in_pattern) {
      videodetect->in_pattern = FALSE;
      gst_video_detect_post_message (videodetect, buffer, 0);
    }
    return;
  }
}
コード例 #30
0
static void
gst_video_flip_y422 (GstVideoFlip * videoflip, guint8 * dest,
    const guint8 * src)
{
  gint x, y;
  guint8 const *s = src;
  guint8 *d = dest;
  GstVideoFormat format = videoflip->format;
  gint sw = videoflip->from_width;
  gint sh = videoflip->from_height;
  gint dw = videoflip->to_width;
  gint dh = videoflip->to_height;
  gint src_stride, dest_stride;
  gint bpp;
  gint y_offset;
  gint u_offset;
  gint v_offset;
  gint y_stride;

  src_stride = gst_video_format_get_row_stride (format, 0, sw);
  dest_stride = gst_video_format_get_row_stride (format, 0, dw);

  y_offset = gst_video_format_get_component_offset (format, 0, sw, sh);
  u_offset = gst_video_format_get_component_offset (format, 1, sw, sh);
  v_offset = gst_video_format_get_component_offset (format, 2, sw, sh);
  y_stride = gst_video_format_get_pixel_stride (format, 0);
  bpp = y_stride;

  switch (videoflip->method) {
    case GST_VIDEO_FLIP_METHOD_90R:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x += 2) {
          guint8 u;
          guint8 v;
          /* u/v must be calculated using the offset of the even column */
          gint even_y = (y & ~1);

          u = s[(sh - 1 - x) * src_stride + even_y * bpp + u_offset];
          if (x + 1 < dw)
            u = (s[(sh - 1 - (x + 1)) * src_stride + even_y * bpp + u_offset]
                + u) >> 1;
          v = s[(sh - 1 - x) * src_stride + even_y * bpp + v_offset];
          if (x + 1 < dw)
            v = (s[(sh - 1 - (x + 1)) * src_stride + even_y * bpp + v_offset]
                + v) >> 1;

          d[y * dest_stride + x * bpp + u_offset] = u;
          d[y * dest_stride + x * bpp + v_offset] = v;
          d[y * dest_stride + x * bpp + y_offset] =
              s[(sh - 1 - x) * src_stride + y * bpp + y_offset];
          if (x + 1 < dw)
            d[y * dest_stride + (x + 1) * bpp + y_offset] =
                s[(sh - 1 - (x + 1)) * src_stride + y * bpp + y_offset];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_90L:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x += 2) {
          guint8 u;
          guint8 v;
          /* u/v must be calculated using the offset of the even column */
          gint even_y = ((sw - 1 - y) & ~1);

          u = s[x * src_stride + even_y * bpp + u_offset];
          if (x + 1 < dw)
            u = (s[(x + 1) * src_stride + even_y * bpp + u_offset] + u) >> 1;
          v = s[x * src_stride + even_y * bpp + v_offset];
          if (x + 1 < dw)
            v = (s[(x + 1) * src_stride + even_y * bpp + v_offset] + v) >> 1;

          d[y * dest_stride + x * bpp + u_offset] = u;
          d[y * dest_stride + x * bpp + v_offset] = v;
          d[y * dest_stride + x * bpp + y_offset] =
              s[x * src_stride + (sw - 1 - y) * bpp + y_offset];
          if (x + 1 < dw)
            d[y * dest_stride + (x + 1) * bpp + y_offset] =
                s[(x + 1) * src_stride + (sw - 1 - y) * bpp + y_offset];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_180:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x += 2) {
          guint8 u;
          guint8 v;
          /* u/v must be calculated using the offset of the even column */
          gint even_x = ((sw - 1 - x) & ~1);

          u = (s[(sh - 1 - y) * src_stride + even_x * bpp + u_offset] +
              s[(sh - 1 - y) * src_stride + even_x * bpp + u_offset]) / 2;
          v = (s[(sh - 1 - y) * src_stride + even_x * bpp + v_offset] +
              s[(sh - 1 - y) * src_stride + even_x * bpp + v_offset]) / 2;

          d[y * dest_stride + x * bpp + u_offset] = u;
          d[y * dest_stride + x * bpp + v_offset] = v;
          d[y * dest_stride + x * bpp + y_offset] =
              s[(sh - 1 - y) * src_stride + (sw - 1 - x) * bpp + y_offset];
          if (x + 1 < dw)
            d[y * dest_stride + (x + 1) * bpp + y_offset] =
                s[(sh - 1 - y) * src_stride + (sw - 1 - (x + 1)) * bpp +
                y_offset];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_HORIZ:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x += 2) {
          guint8 u;
          guint8 v;
          /* u/v must be calculated using the offset of the even column */
          gint even_x = ((sw - 1 - x) & ~1);

          u = (s[y * src_stride + even_x * bpp + u_offset] +
              s[y * src_stride + even_x * bpp + u_offset]) / 2;
          v = (s[y * src_stride + even_x * bpp + v_offset] +
              s[y * src_stride + even_x * bpp + v_offset]) / 2;

          d[y * dest_stride + x * bpp + u_offset] = u;
          d[y * dest_stride + x * bpp + v_offset] = v;
          d[y * dest_stride + x * bpp + y_offset] =
              s[y * src_stride + (sw - 1 - x) * bpp + y_offset];
          if (x + 1 < dw)
            d[y * dest_stride + (x + 1) * bpp + y_offset] =
                s[y * src_stride + (sw - 1 - (x + 1)) * bpp + y_offset];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_VERT:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x += 2) {
          guint8 u;
          guint8 v;
          /* u/v must be calculated using the offset of the even column */
          gint even_x = (x & ~1);

          u = (s[(sh - 1 - y) * src_stride + even_x * bpp + u_offset] +
              s[(sh - 1 - y) * src_stride + even_x * bpp + u_offset]) / 2;
          v = (s[(sh - 1 - y) * src_stride + even_x * bpp + v_offset] +
              s[(sh - 1 - y) * src_stride + even_x * bpp + v_offset]) / 2;

          d[y * dest_stride + x * bpp + u_offset] = u;
          d[y * dest_stride + x * bpp + v_offset] = v;
          d[y * dest_stride + x * bpp + y_offset] =
              s[(sh - 1 - y) * src_stride + x * bpp + y_offset];
          if (x + 1 < dw)
            d[y * dest_stride + (x + 1) * bpp + y_offset] =
                s[(sh - 1 - y) * src_stride + (x + 1) * bpp + y_offset];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_TRANS:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x += 2) {
          guint8 u;
          guint8 v;
          /* u/v must be calculated using the offset of the even column */
          gint even_y = (y & ~1);

          u = s[x * src_stride + even_y * bpp + u_offset];
          if (x + 1 < dw)
            u = (s[(x + 1) * src_stride + even_y * bpp + u_offset] + u) >> 1;
          v = s[x * src_stride + even_y * bpp + v_offset];
          if (x + 1 < dw)
            v = (s[(x + 1) * src_stride + even_y * bpp + v_offset] + v) >> 1;

          d[y * dest_stride + x * bpp + u_offset] = u;
          d[y * dest_stride + x * bpp + v_offset] = v;
          d[y * dest_stride + x * bpp + y_offset] =
              s[x * src_stride + y * bpp + y_offset];
          if (x + 1 < dw)
            d[y * dest_stride + (x + 1) * bpp + y_offset] =
                s[(x + 1) * src_stride + y * bpp + y_offset];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_OTHER:
      for (y = 0; y < dh; y++) {
        for (x = 0; x < dw; x += 2) {
          guint8 u;
          guint8 v;
          /* u/v must be calculated using the offset of the even column */
          gint even_y = ((sw - 1 - y) & ~1);

          u = s[(sh - 1 - x) * src_stride + even_y * bpp + u_offset];
          if (x + 1 < dw)
            u = (s[(sh - 1 - (x + 1)) * src_stride + even_y * bpp + u_offset]
                + u) >> 1;
          v = s[(sh - 1 - x) * src_stride + even_y * bpp + v_offset];
          if (x + 1 < dw)
            v = (s[(sh - 1 - (x + 1)) * src_stride + even_y * bpp + v_offset]
                + v) >> 1;

          d[y * dest_stride + x * bpp + u_offset] = u;
          d[y * dest_stride + x * bpp + v_offset] = v;
          d[y * dest_stride + x * bpp + y_offset] =
              s[(sh - 1 - x) * src_stride + (sw - 1 - y) * bpp + y_offset];
          if (x + 1 < dw)
            d[y * dest_stride + (x + 1) * bpp + y_offset] =
                s[(sh - 1 - (x + 1)) * src_stride + (sw - 1 - y) * bpp +
                y_offset];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_IDENTITY:
      g_assert_not_reached ();
      break;
    default:
      g_assert_not_reached ();
      break;
  }
}