コード例 #1
0
ファイル: gstvideoscale.c プロジェクト: ChinnaSuhas/ossbuild
static void
gst_video_scale_setup_vs_image (VSImage * image, GstVideoFormat format,
    gint component, gint width, gint height, gint b_w, gint b_h, uint8_t * data)
{
  image->real_width =
      gst_video_format_get_component_width (format, component, width);
  image->real_height =
      gst_video_format_get_component_height (format, component, height);
  image->width =
      gst_video_format_get_component_width (format, component, MAX (1,
          width - b_w));
  image->height =
      gst_video_format_get_component_height (format, component, MAX (1,
          height - b_h));
  image->stride = gst_video_format_get_row_stride (format, component, width);

  image->border_top = (image->real_height - image->height) / 2;
  image->border_bottom = image->real_height - image->height - image->border_top;

  if (format == GST_VIDEO_FORMAT_YUY2 || format == GST_VIDEO_FORMAT_YVYU
      || format == GST_VIDEO_FORMAT_UYVY) {
    g_assert (component == 0);

    image->border_left = (image->real_width - image->width) / 2;

    if (image->border_left % 2 == 1)
      image->border_left--;
    image->border_right = image->real_width - image->width - image->border_left;
  } else {
    image->border_left = (image->real_width - image->width) / 2;
    image->border_right = image->real_width - image->width - image->border_left;
  }

  if (format == GST_VIDEO_FORMAT_I420
      || format == GST_VIDEO_FORMAT_YV12
      || format == GST_VIDEO_FORMAT_Y444
      || format == GST_VIDEO_FORMAT_Y42B || format == GST_VIDEO_FORMAT_Y41B) {
    image->real_pixels = data + gst_video_format_get_component_offset (format,
        component, width, height);
  } else {
    g_assert (component == 0);
    image->real_pixels = data;
  }

  image->pixels =
      image->real_pixels + image->border_top * image->stride +
      image->border_left * gst_video_format_get_pixel_stride (format,
      component);
}
コード例 #2
0
ファイル: gstgamma.c プロジェクト: luisbg/gst-plugins-good
static void
gst_gamma_packed_yuv_ip (GstGamma * gamma, guint8 * data)
{
  gint i, j, height;
  gint width, row_stride, row_wrap;
  gint pixel_stride;
  const guint8 *table = gamma->gamma_table;

  data = data + gst_video_format_get_component_offset (gamma->format, 0,
      gamma->width, gamma->height);

  width = gst_video_format_get_component_width (gamma->format, 0, gamma->width);
  height = gst_video_format_get_component_height (gamma->format, 0,
      gamma->height);
  row_stride = gst_video_format_get_row_stride (gamma->format, 0, gamma->width);
  pixel_stride = gst_video_format_get_pixel_stride (gamma->format, 0);
  row_wrap = row_stride - pixel_stride * width;

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      *data = table[*data];
      data += pixel_stride;
    }
    data += row_wrap;
  }
}
コード例 #3
0
static void
gst_video_balance_planar_yuv (GstVideoBalance * videobalance, guint8 * data)
{
  gint x, y;
  guint8 *ydata;
  guint8 *udata, *vdata;
  gint ystride, ustride, vstride;
  GstVideoFormat format;
  gint width, height;
  gint width2, height2;
  guint8 *tabley = videobalance->tabley;
  guint8 **tableu = videobalance->tableu;
  guint8 **tablev = videobalance->tablev;

  format = videobalance->format;
  width = videobalance->width;
  height = videobalance->height;

  ydata =
      data + gst_video_format_get_component_offset (format, 0, width, height);
  ystride = gst_video_format_get_row_stride (format, 0, width);

  for (y = 0; y < height; y++) {
    guint8 *yptr;

    yptr = ydata + y * ystride;
    for (x = 0; x < width; x++) {
      *ydata = tabley[*ydata];
      ydata++;
    }
  }

  width2 = gst_video_format_get_component_width (format, 1, width);
  height2 = gst_video_format_get_component_height (format, 1, height);

  udata =
      data + gst_video_format_get_component_offset (format, 1, width, height);
  vdata =
      data + gst_video_format_get_component_offset (format, 2, width, height);
  ustride = gst_video_format_get_row_stride (format, 1, width);
  vstride = gst_video_format_get_row_stride (format, 1, width);

  for (y = 0; y < height2; y++) {
    guint8 *uptr, *vptr;
    guint8 u1, v1;

    uptr = udata + y * ustride;
    vptr = vdata + y * vstride;

    for (x = 0; x < width2; x++) {
      u1 = *uptr;
      v1 = *vptr;

      *uptr++ = tableu[u1][v1];
      *vptr++ = tablev[u1][v1];
    }
  }
}
コード例 #4
0
static void
gst_color_effects_transform_rgb (GstColorEffects * filter, guint8 * data)
{
  gint i, j;
  gint width, height;
  gint pixel_stride, row_stride, row_wrap;
  guint32 r, g, b;
  guint32 luma;
  gint offsets[3];

  /* videoformat fun copied from videobalance */

  offsets[0] = gst_video_format_get_component_offset (filter->format, 0,
      filter->width, filter->height);
  offsets[1] = gst_video_format_get_component_offset (filter->format, 1,
      filter->width, filter->height);
  offsets[2] = gst_video_format_get_component_offset (filter->format, 2,
      filter->width, filter->height);

  width =
      gst_video_format_get_component_width (filter->format, 0, filter->width);
  height =
      gst_video_format_get_component_height (filter->format, 0, filter->height);
  row_stride =
      gst_video_format_get_row_stride (filter->format, 0, filter->width);
  pixel_stride = gst_video_format_get_pixel_stride (filter->format, 0);
  row_wrap = row_stride - pixel_stride * width;

  /* transform */

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      r = data[offsets[0]];
      g = data[offsets[1]];
      b = data[offsets[2]];
      if (filter->map_luma) {
        /* BT. 709 coefficients in B8 fixed point */
        /* 0.2126 R + 0.7152 G + 0.0722 B */
        luma = ((r << 8) * 54) + ((g << 8) * 183) + ((b << 8) * 19);
        luma >>= 16;            /* get integer part */
        luma *= 3;              /* times 3 to retrieve the correct pixel from
                                 * the lut */
        /* map luma to lookup table */
        /* src.luma |-> table[luma].rgb */
        data[offsets[0]] = filter->table[luma];
        data[offsets[1]] = filter->table[luma + 1];
        data[offsets[2]] = filter->table[luma + 2];
      } else {
        /* map each color component to the correspondent lut color */
        /* src.r |-> table[r].r */
        /* src.g |-> table[g].g */
        /* src.b |-> table[b].b */
        data[offsets[0]] = filter->table[r * 3];
        data[offsets[1]] = filter->table[g * 3 + 1];
        data[offsets[2]] = filter->table[b * 3 + 2];
      }
      data += pixel_stride;
    }
コード例 #5
0
/* Allocate buffer and copy image data into Y444 format */
static GstFlowReturn
theora_handle_image (GstTheoraDec * dec, th_ycbcr_buffer buf, GstBuffer ** out)
{
  gint width, height, stride;
  GstFlowReturn result;
  int i, plane;
  GstVideoFormat format;
  guint8 *dest, *src;

  switch (dec->info.pixel_fmt) {
    case TH_PF_444:
      format = GST_VIDEO_FORMAT_Y444;
      break;
    case TH_PF_420:
      format = GST_VIDEO_FORMAT_I420;
      break;
    case TH_PF_422:
      format = GST_VIDEO_FORMAT_Y42B;
      break;
    default:
      g_assert_not_reached ();
  }

  result =
      gst_pad_alloc_buffer_and_set_caps (dec->srcpad, GST_BUFFER_OFFSET_NONE,
      gst_video_format_get_size (format, dec->width, dec->height),
      GST_PAD_CAPS (dec->srcpad), out);
  if (G_UNLIKELY (result != GST_FLOW_OK)) {
    GST_DEBUG_OBJECT (dec, "could not get buffer, reason: %s",
        gst_flow_get_name (result));
    return result;
  }

  for (plane = 0; plane < 3; plane++) {
    width = gst_video_format_get_component_width (format, plane, dec->width);
    height = gst_video_format_get_component_height (format, plane, dec->height);
    stride = gst_video_format_get_row_stride (format, plane, dec->width);

    dest =
        GST_BUFFER_DATA (*out) + gst_video_format_get_component_offset (format,
        plane, dec->width, dec->height);
    src = buf[plane].data;
    src += ((height == dec->height) ? dec->offset_y : dec->offset_y / 2)
        * buf[plane].stride;
    src += (width == dec->width) ? dec->offset_x : dec->offset_x / 2;

    for (i = 0; i < height; i++) {
      memcpy (dest, src, width);

      dest += stride;
      src += buf[plane].stride;
    }
  }

  return GST_FLOW_OK;
}
コード例 #6
0
ファイル: gstgamma.c プロジェクト: luisbg/gst-plugins-good
static void
gst_gamma_packed_rgb_ip (GstGamma * gamma, guint8 * data)
{
  gint i, j, height;
  gint width, row_stride, row_wrap;
  gint pixel_stride;
  const guint8 *table = gamma->gamma_table;
  gint offsets[3];
  gint r, g, b;
  gint y, u, v;

  offsets[0] = gst_video_format_get_component_offset (gamma->format, 0,
      gamma->width, gamma->height);
  offsets[1] = gst_video_format_get_component_offset (gamma->format, 1,
      gamma->width, gamma->height);
  offsets[2] = gst_video_format_get_component_offset (gamma->format, 2,
      gamma->width, gamma->height);

  width = gst_video_format_get_component_width (gamma->format, 0, gamma->width);
  height = gst_video_format_get_component_height (gamma->format, 0,
      gamma->height);
  row_stride = gst_video_format_get_row_stride (gamma->format, 0, gamma->width);
  pixel_stride = gst_video_format_get_pixel_stride (gamma->format, 0);
  row_wrap = row_stride - pixel_stride * width;

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      r = data[offsets[0]];
      g = data[offsets[1]];
      b = data[offsets[2]];

      y = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 0, r, g, b);
      u = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 1, r, g, b);
      v = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 2, r, g, b);

      y = table[CLAMP (y, 0, 255)];
      r = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 0, y, u, v);
      g = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 1, y, u, v);
      b = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 2, y, u, v);

      data[offsets[0]] = CLAMP (r, 0, 255);
      data[offsets[1]] = CLAMP (g, 0, 255);
      data[offsets[2]] = CLAMP (b, 0, 255);
      data += pixel_stride;
    }
    data += row_wrap;
  }
}
コード例 #7
0
ファイル: gsttheoraenc.c プロジェクト: ChinnaSuhas/ossbuild
static void
theora_enc_init_buffer (th_ycbcr_buffer buf, th_info * info, guint8 * data)
{
  GstVideoFormat format;
  guint i;

  switch (info->pixel_fmt) {
    case TH_PF_444:
      format = GST_VIDEO_FORMAT_Y444;
      break;
    case TH_PF_420:
      format = GST_VIDEO_FORMAT_I420;
      break;
    case TH_PF_422:
      format = GST_VIDEO_FORMAT_Y42B;
      break;
    default:
      g_assert_not_reached ();
  }

  /* According to Theora developer Timothy Terriberry, the Theora 
   * encoder will not use memory outside of pic_width/height, even when
   * the frame size is bigger. The values outside this region will be encoded
   * to default values.
   * Due to this, setting the frame's width/height as the buffer width/height
   * is perfectly ok, even though it does not strictly look ok.
   */
  for (i = 0; i < 3; i++) {
    buf[i].width =
        gst_video_format_get_component_width (format, i, info->frame_width);
    buf[i].height =
        gst_video_format_get_component_height (format, i, info->frame_height);

    buf[i].data =
        data + gst_video_format_get_component_offset (format, i,
        info->pic_width, info->pic_height);
    buf[i].stride =
        gst_video_format_get_row_stride (format, i, info->pic_width);
  }
}
コード例 #8
0
ファイル: assrender.c プロジェクト: adesurya/gst-mobile
static void
sink_handoff_cb_I420 (GstElement * object, GstBuffer * buffer, GstPad * pad,
    gpointer user_data)
{
  guint *sink_pos = (guint *) user_data;
  gboolean contains_text = (*sink_pos == 1 || *sink_pos == 2);
  guint c, i, j;
  guint8 *data = GST_BUFFER_DATA (buffer);
  gboolean all_red = TRUE;
  guint8 *comp;
  gint comp_stride, comp_width, comp_height;
  const guint8 color[] = { 81, 90, 240 };

  fail_unless_equals_int (GST_BUFFER_SIZE (buffer),
      gst_video_format_get_size (GST_VIDEO_FORMAT_I420, 640, 480));

  for (c = 0; c < 3; c++) {
    comp =
        data + gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420, c,
        640, 480);
    comp_stride =
        gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, c, 640);
    comp_width =
        gst_video_format_get_component_width (GST_VIDEO_FORMAT_I420, c, 640);
    comp_height =
        gst_video_format_get_component_height (GST_VIDEO_FORMAT_I420, c, 480);

    for (i = 0; i < comp_height; i++) {
      for (j = 0; j < comp_width; j++) {
        all_red = all_red && (comp[i * comp_stride + j] == color[c]);
      }
    }
  }

  fail_unless (contains_text != all_red,
      "Frame %d is incorrect (all red %d, contains text %d)", *sink_pos,
      all_red, contains_text);
  *sink_pos = *sink_pos + 1;
}
コード例 #9
0
gboolean AVSC_CC
_avs_vcf_add_buffer (AVS_VideoCacheFilter *p, GstPad *pad, GstBuffer *inbuf, AVS_ScriptEnvironment *env)
{
  AVS_VideoFrame *buf_ptr;
  AVS_CacheableVideoFrame *cvf;
  AVSynthSink *sink;

  gboolean ret = TRUE;

  guint8 *in_data;
  guint in_size;
  GstClockTime in_timestamp, in_duration, in_running_time;
  gint64 in_offset;
  GstVideoFormat vf;
  gint in_stride0, in_stride1, in_stride2;
  gint offset0, offset1, offset2;
  gint rowsize0, rowsize1, rowsize2;
  gint height0, height1, height2;

  in_data = GST_BUFFER_DATA (inbuf);
  in_size = GST_BUFFER_SIZE (inbuf);
  in_timestamp = GST_BUFFER_TIMESTAMP (inbuf);
  in_duration = GST_BUFFER_DURATION (inbuf);
  in_offset = GST_BUFFER_OFFSET (inbuf);

  sink = (AVSynthSink *) g_object_get_data (G_OBJECT (pad), "sinkstruct");

  GST_DEBUG ("Video cache %p: locking sinkmutex", (gpointer) p);
  g_mutex_lock (sink->sinkmutex);

  in_running_time = gst_segment_to_running_time (&sink->segment, GST_FORMAT_TIME, in_timestamp);

  if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID(sink->first_ts) && GST_CLOCK_TIME_IS_VALID(in_timestamp)))
  {
    sink->first_ts = in_timestamp;
  }

  /* Offset voodoo magic */
  /* No offset on incoming frame */
  if (in_offset == -1)
  {
    /* Do we know offset of the previous frame? */
    if (sink->last_offset > -1)
    {
      in_offset = sink->last_offset + 1;
    }
    else
    {
      /* Try to convert timestamp to offset */
      if (in_timestamp >= 0 && in_running_time >= 0)
      {
        in_offset = gst_util_uint64_scale (in_running_time - sink->first_ts, p->parent.vi.fps_numerator,
            p->parent.vi.fps_denominator * GST_SECOND);
        /* Attempt to round to nearest integer: if the difference is more
         * than 0.5 (less than -0.5), it means that gst_util_uint64_scale()
         * just truncated an integer, while it had to be rounded
         */

        in_offset = in_offset * GST_SECOND - 
            in_running_time * p->parent.vi.fps_numerator / p->parent.vi.fps_denominator <= 
            -0.5 ? in_offset + 1: in_offset;
      }
      else
      {
        GST_ERROR ("Video cache %p: frame offset is unknown", (gpointer) p);
        ret = FALSE;
        goto end;
      }
    }
  }
  /* Offset sanity check */
  if (sink->last_offset > -1)
  {
    /* Non-monotonic offsets */
    if (in_offset < sink->last_offset || in_offset > sink->last_offset + 1)
    {
      GST_WARNING ("Video cache %p: last offset was %" G_GUINT64_FORMAT ", current offset is %" G_GUINT64_FORMAT " - shouldn't it be %" G_GUINT64_FORMAT "?", p, sink->last_offset, in_offset, sink->last_offset + 1);
      in_offset = sink->last_offset + 1;
    }
    else if (in_offset == sink->last_offset)
    {
      GST_WARNING ("Video cache %p: duplicate offsets %" G_GUINT64_FORMAT ", dropping", (gpointer) p, in_offset);
      goto end;
    }
  }

  sink->last_offset = in_offset;

  if (p->size >= p->used_size && !sink->flush)
  {
    GST_DEBUG ("Video cache %p: blocking at frame %" G_GUINT64_FORMAT, (gpointer) p, in_offset);
    while (p->size >= p->used_size && !sink->flush && !sink->seeking)
    {
      GST_DEBUG ("Video cache %p: sleeping while waiting at frame %" G_GUINT64_FORMAT
        ", cache range%" G_GUINT64_FORMAT "+ %" G_GUINT64_FORMAT
        ", size=%" G_GUINT64_FORMAT" , stream lock = %p", (gpointer) p, in_offset, p->rng_from, p->used_size,
        p->size, GST_PAD_GET_STREAM_LOCK (pad));
      g_cond_wait (p->vcache_block_cond, sink->sinkmutex);
      GST_DEBUG ("Video cache %p: woke up while waiting at frame %" G_GUINT64_FORMAT
        ", cache range%" G_GUINT64_FORMAT "+ %" G_GUINT64_FORMAT
        ", size=%" G_GUINT64_FORMAT" , stream lock = %p", (gpointer) p, in_offset, p->rng_from, p->used_size,
        p->size, GST_PAD_GET_STREAM_LOCK (pad));
    }
  }

  /* We've been seeking backwards and the seek wasn't very precise, so
   * we're getting frames previous to the frame we need.
   * Or we're in seek mode and the frame is not the frame we're seeking to.
   * If we've pushed a seek event and it moved the source to a frame after
   * rng_from (i.e. the seek missed), this will turn into infinite loop.
   */
  if (G_UNLIKELY (sink->flush || in_offset < p->rng_from && !sink->seeking || sink->seeking && in_offset != p->rng_from))
  {
    if (sink->flush)
      GST_DEBUG ("Video cache %p: skipping frame %" G_GUINT64_FORMAT " - flushing", (gpointer) p, in_offset);
    else if (in_offset < p->rng_from && !sink->seeking)
      GST_DEBUG ("Video cache %p: skipping frame %" G_GUINT64_FORMAT " < %" G_GUINT64_FORMAT, (gpointer) p, in_offset, p->rng_from);
    else if (sink->seeking && in_offset != p->rng_from)
      GST_DEBUG ("Video cache %p: skipping frame %" G_GUINT64_FORMAT " - seeking to %" G_GUINT64_FORMAT, (gpointer) p, in_offset, p->rng_from);   
    goto end;
  }
  sink->seeking = FALSE;

  gst_avsynth_buf_pad_caps_to_vi (inbuf, pad, GST_BUFFER_CAPS (inbuf), &p->parent.vi);

  gst_video_format_parse_caps (GST_BUFFER_CAPS (inbuf), &vf, NULL, NULL);

  /* Allocate a new frame, with default alignment */
  buf_ptr = _avs_se_vf_new_a (env, &p->vi, AVS_FRAME_ALIGN);

  offset0 = gst_video_format_get_component_offset (vf, 0, p->parent.vi.width, p->parent.vi.height);
  offset1 = gst_video_format_get_component_offset (vf, 1, p->parent.vi.width, p->parent.vi.height);
  offset2 = gst_video_format_get_component_offset (vf, 2, p->parent.vi.width, p->parent.vi.height);

  /* The Spherical Horse in Vacuum: row stride is not guaranteed to match the
   * value returned by this function.
   */
  in_stride0 = gst_video_format_get_row_stride (vf, 0, p->parent.vi.width);
  in_stride1 = gst_video_format_get_row_stride (vf, 1, p->parent.vi.width);
  in_stride2 = gst_video_format_get_row_stride (vf, 2, p->parent.vi.width);

  rowsize0 = gst_video_format_get_component_width (vf, 0, p->parent.vi.width) * gst_video_format_get_pixel_stride (vf, 0);
  rowsize1 = gst_video_format_get_component_width (vf, 1, p->parent.vi.width) * gst_video_format_get_pixel_stride (vf, 1);
  rowsize2 = gst_video_format_get_component_width (vf, 2, p->parent.vi.width) * gst_video_format_get_pixel_stride (vf, 2);

  height0 = gst_video_format_get_component_height (vf, 0, p->parent.vi.height);
  height1 = gst_video_format_get_component_height (vf, 1, p->parent.vi.height);
  height2 = gst_video_format_get_component_height (vf, 2, p->parent.vi.height);

  if (!AVS_IS_PLANAR (&p->parent.vi))
  {
    offset2 = offset1 = offset0;
    in_stride2 = in_stride1 = 0;
    rowsize2 = rowsize1 = 0;
    height2 = height1 = 0;
  }

  _avs_se_bit_blt (env, _avs_vf_get_write_ptr (buf_ptr), _avs_vf_get_pitch (buf_ptr), in_data + offset0, in_stride0, rowsize0, height0);
  // Blit More planes (pitch, rowsize and height should be 0, if none is present)
  _avs_se_bit_blt (env, _avs_vf_get_write_ptr_p (buf_ptr, PLANAR_U), _avs_vf_get_pitch_p (buf_ptr, PLANAR_U), in_data + offset1, in_stride1, rowsize1, height1);
  _avs_se_bit_blt (env, _avs_vf_get_write_ptr_p (buf_ptr, PLANAR_V), _avs_vf_get_pitch_p (buf_ptr, PLANAR_V), in_data + offset2, in_stride2, rowsize2, height2);

  cvf = g_new0 (AVS_CacheableVideoFrame, 1);
  cvf->vf = buf_ptr;
  cvf->touched = FALSE;
  cvf->selfindex = in_offset;
  cvf->countindex = p->framecounter++;
  _avs_vf_set_timestamp (buf_ptr, in_timestamp);
  _avs_vf_set_parity (buf_ptr, p->parent.vi.image_type);

  /* Buffer is full, meaning that a filter is not processing frames
   * fast enough.
   */
  if (G_UNLIKELY (p->used_size <= p->size))
  {
    if (p->size > p->used_size)
      g_critical ("Video cache %p: buffer overflow - %" G_GUINT64_FORMAT " > %" G_GUINT64_FORMAT, (gpointer) p, p->used_size, p->size);
    GST_DEBUG ("Video cache %p: cache is full", (gpointer) p);
    /* Cache is relatively small, we can expand it */
    if (G_UNLIKELY (p->touched_last_time * 3 > p->used_size))
    {
      GST_DEBUG ("Video cache %p: cache is relatively small (%" G_GUINT64_FORMAT " > %" G_GUINT64_FORMAT "), expanding...", (gpointer) p, p->touched_last_time * 3, p->used_size);
      _avs_vcf_resize (p, p->used_size + 1);
    }
    else
      g_critical ("Video cache %p: cache is overflowing!", (gpointer) p);
  }

  /* It is guaranteed that at this moment we have at least one free unused
   * array element left. At least it should be guaranteed...
   */
  GST_DEBUG ("Video cache %p: cache size = %" G_GUINT64_FORMAT ", adding a buffer %p (%p), offset = %" G_GUINT64_FORMAT, (gpointer) p, p->size, cvf, buf_ptr, in_offset);
  g_ptr_array_index (p->bufs, p->size++) = (gpointer) cvf;

  /* We don't really know the number of frame the other thread is waiting for
   * (or even if it waits at all), so we'll send a signal each time we add
   * a buffer.
   * People told me that calling g_cond_signal once for each frame (60 times
   * a second, unless you're transcoding a video) doesn't make a difference.
   * And transcoding itself is MUCH slower.
   */
  GST_DEBUG ("Video cache %p: signaling newframe", (gpointer) p);
  g_cond_signal (p->vcache_cond);

end:
  g_mutex_unlock (sink->sinkmutex);
 
  GST_DEBUG ("Video cache %p: unlocked sinkmutex", (gpointer) p);

  return ret;
}
コード例 #10
0
static GstFlowReturn
gst_jasper_dec_negotiate (GstJasperDec * dec, jas_image_t * image)
{
  GstFlowReturn flow_ret = GST_FLOW_OK;
  gint width, height, channels;
  gint i, j;
  gboolean negotiate = FALSE;
  jas_clrspc_t clrspc;
  GstCaps *allowed_caps, *caps;

  width = jas_image_width (image);
  height = jas_image_height (image);
  channels = jas_image_numcmpts (image);

  GST_LOG_OBJECT (dec, "%d x %d, %d components", width, height, channels);

  /* jp2c bitstream has no real colour space info (kept in container),
   * so decoder may only pretend to know, where it really does not */
  if (!jas_clrspc_isunknown (dec->clrspc)) {
    clrspc = dec->clrspc;
    GST_DEBUG_OBJECT (dec, "forcing container supplied colour space %d",
        clrspc);
    jas_image_setclrspc (image, clrspc);
  } else
    clrspc = jas_image_clrspc (image);

  if (!width || !height || !channels || jas_clrspc_isunknown (clrspc))
    goto fail_image;

  if (dec->width != width || dec->height != height ||
      dec->channels != channels || dec->clrspc != clrspc)
    negotiate = TRUE;

  if (channels != 3)
    goto not_supported;

  for (i = 0; i < channels; i++) {
    gint cheight, cwidth, depth, sgnd;

    cheight = jas_image_cmptheight (image, i);
    cwidth = jas_image_cmptwidth (image, i);
    depth = jas_image_cmptprec (image, i);
    sgnd = jas_image_cmptsgnd (image, i);

    GST_LOG_OBJECT (dec, "image component %d, %dx%d, depth %d, sgnd %d", i,
        cwidth, cheight, depth, sgnd);

    if (depth != 8 || sgnd)
      goto not_supported;

    if (dec->cheight[i] != cheight || dec->cwidth[i] != cwidth) {
      dec->cheight[i] = cheight;
      dec->cwidth[i] = cwidth;
      negotiate = TRUE;
    }
  }

  if (!negotiate && dec->format != GST_VIDEO_FORMAT_UNKNOWN)
    goto done;

  /* clear and refresh to new state */
  flow_ret = GST_FLOW_NOT_NEGOTIATED;
  dec->format = GST_VIDEO_FORMAT_UNKNOWN;
  dec->width = width;
  dec->height = height;
  dec->channels = channels;

  /* retrieve allowed caps, and find the first one that reasonably maps
   * to the parameters of the colourspace */
  caps = gst_pad_get_allowed_caps (dec->srcpad);
  if (!caps) {
    GST_DEBUG_OBJECT (dec, "... but no peer, using template caps");
    /* need to copy because get_allowed_caps returns a ref,
       and get_pad_template_caps doesn't */
    caps = gst_caps_copy (gst_pad_get_pad_template_caps (dec->srcpad));
  }
  /* avoid lists of fourcc, etc */
  allowed_caps = gst_caps_normalize (caps);
  caps = NULL;
  GST_LOG_OBJECT (dec, "allowed source caps %" GST_PTR_FORMAT, allowed_caps);

  for (i = 0; i < gst_caps_get_size (allowed_caps); i++) {
    GstVideoFormat format;
    gboolean ok;

    if (caps)
      gst_caps_unref (caps);
    caps = gst_caps_copy_nth (allowed_caps, i);
    /* sigh, ds and _parse_caps need fixed caps for parsing, fixate */
    gst_pad_fixate_caps (dec->srcpad, caps);
    GST_LOG_OBJECT (dec, "checking caps %" GST_PTR_FORMAT, caps);
    if (!gst_video_format_parse_caps (caps, &format, NULL, NULL))
      continue;
    if (gst_video_format_is_rgb (format) &&
        jas_clrspc_fam (clrspc) == JAS_CLRSPC_FAM_RGB) {
      GST_DEBUG_OBJECT (dec, "trying RGB");
      if ((dec->cmpt[0] = jas_image_getcmptbytype (image,
                  JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_RGB_R))) < 0 ||
          (dec->cmpt[1] = jas_image_getcmptbytype (image,
                  JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_RGB_G))) < 0 ||
          (dec->cmpt[2] = jas_image_getcmptbytype (image,
                  JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_RGB_B))) < 0) {
        GST_DEBUG_OBJECT (dec, "missing RGB color component");
        continue;
      }
    } else if (gst_video_format_is_yuv (format) &&
        jas_clrspc_fam (clrspc) == JAS_CLRSPC_FAM_YCBCR) {
      GST_DEBUG_OBJECT (dec, "trying YUV");
      if ((dec->cmpt[0] = jas_image_getcmptbytype (image,
                  JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_YCBCR_Y))) < 0 ||
          (dec->cmpt[1] = jas_image_getcmptbytype (image,
                  JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_YCBCR_CB))) < 0 ||
          (dec->cmpt[2] = jas_image_getcmptbytype (image,
                  JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_YCBCR_CR))) < 0) {
        GST_DEBUG_OBJECT (dec, "missing YUV color component");
        continue;
      }
    } else
      continue;
    /* match format with validity checks */
    ok = TRUE;
    for (j = 0; j < channels; j++) {
      gint cmpt;

      cmpt = dec->cmpt[j];
      if (dec->cwidth[cmpt] != gst_video_format_get_component_width (format, j,
              width) ||
          dec->cheight[cmpt] != gst_video_format_get_component_height (format,
              j, height))
        ok = FALSE;
    }
    /* commit to this format */
    if (ok) {
      dec->format = format;
      break;
    }
  }

  if (caps)
    gst_caps_unref (caps);
  gst_caps_unref (allowed_caps);

  if (dec->format != GST_VIDEO_FORMAT_UNKNOWN) {
    /* cache some video format properties */
    for (j = 0; j < channels; ++j) {
      dec->offset[j] = gst_video_format_get_component_offset (dec->format, j,
          dec->width, dec->height);
      dec->inc[j] = gst_video_format_get_pixel_stride (dec->format, j);
      dec->stride[j] = gst_video_format_get_row_stride (dec->format, j,
          dec->width);
    }
    dec->image_size = gst_video_format_get_size (dec->format, width, height);
    dec->alpha = gst_video_format_has_alpha (dec->format);

    if (dec->buf)
      g_free (dec->buf);
    dec->buf = g_new0 (glong, dec->width);

    caps = gst_video_format_new_caps (dec->format, dec->width, dec->height,
        dec->framerate_numerator, dec->framerate_denominator, 1, 1);

    GST_DEBUG_OBJECT (dec, "Set format to %d, size to %dx%d", dec->format,
        dec->width, dec->height);

    if (!gst_pad_set_caps (dec->srcpad, caps))
      flow_ret = GST_FLOW_NOT_NEGOTIATED;
    else
      flow_ret = GST_FLOW_OK;

    gst_caps_unref (caps);
  }

done:
  return flow_ret;

  /* ERRORS */
fail_image:
  {
    GST_DEBUG_OBJECT (dec, "Failed to process decoded image.");
    flow_ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }
not_supported:
  {
    GST_DEBUG_OBJECT (dec, "Decoded image has unsupported colour space.");
    GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("Unsupported colorspace"));
    flow_ret = GST_FLOW_ERROR;
    goto done;
  }
}
コード例 #11
0
static void
gst_video_balance_packed_rgb (GstVideoBalance * videobalance, guint8 * data)
{
  gint i, j, height;
  gint width, row_stride, row_wrap;
  gint pixel_stride;
  gint offsets[3];
  gint r, g, b;
  gint y, u, v;
  gint u_tmp, v_tmp;
  guint8 *tabley = videobalance->tabley;
  guint8 **tableu = videobalance->tableu;
  guint8 **tablev = videobalance->tablev;

  offsets[0] = gst_video_format_get_component_offset (videobalance->format, 0,
      videobalance->width, videobalance->height);
  offsets[1] = gst_video_format_get_component_offset (videobalance->format, 1,
      videobalance->width, videobalance->height);
  offsets[2] = gst_video_format_get_component_offset (videobalance->format, 2,
      videobalance->width, videobalance->height);

  width =
      gst_video_format_get_component_width (videobalance->format, 0,
      videobalance->width);
  height =
      gst_video_format_get_component_height (videobalance->format, 0,
      videobalance->height);
  row_stride =
      gst_video_format_get_row_stride (videobalance->format, 0,
      videobalance->width);
  pixel_stride = gst_video_format_get_pixel_stride (videobalance->format, 0);
  row_wrap = row_stride - pixel_stride * width;

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      r = data[offsets[0]];
      g = data[offsets[1]];
      b = data[offsets[2]];

      y = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 0, r, g, b);
      u_tmp = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 1, r, g, b);
      v_tmp = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 2, r, g, b);

      y = CLAMP (y, 0, 255);
      u_tmp = CLAMP (u_tmp, 0, 255);
      v_tmp = CLAMP (v_tmp, 0, 255);

      y = tabley[y];
      u = tableu[u_tmp][v_tmp];
      v = tablev[u_tmp][v_tmp];

      r = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 0, y, u, v);
      g = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 1, y, u, v);
      b = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 2, y, u, v);

      data[offsets[0]] = CLAMP (r, 0, 255);
      data[offsets[1]] = CLAMP (g, 0, 255);
      data[offsets[2]] = CLAMP (b, 0, 255);
      data += pixel_stride;
    }
    data += row_wrap;
  }
}
コード例 #12
0
static gboolean
gst_jpegenc_setcaps (GstPad * pad, GstCaps * caps)
{
  GstJpegEnc *enc = GST_JPEGENC (gst_pad_get_parent (pad));
  GstVideoFormat format;
  gint width, height;
  gint fps_num, fps_den;
  gint par_num, par_den;
  gint i;
  GstCaps *othercaps;
  gboolean ret;

  /* get info from caps */
  if (!gst_video_format_parse_caps (caps, &format, &width, &height))
    goto refuse_caps;
  /* optional; pass along if present */
  fps_num = fps_den = -1;
  par_num = par_den = -1;
  gst_video_parse_caps_framerate (caps, &fps_num, &fps_den);
  gst_video_parse_caps_pixel_aspect_ratio (caps, &par_num, &par_den);

  if (width == enc->width && height == enc->height && enc->format == format
      && fps_num == enc->fps_num && fps_den == enc->fps_den
      && par_num == enc->par_num && par_den == enc->par_den)
    return TRUE;

  /* store input description */
  enc->format = format;
  enc->width = width;
  enc->height = height;
  enc->fps_num = fps_num;
  enc->fps_den = fps_den;
  enc->par_num = par_num;
  enc->par_den = par_den;

  /* prepare a cached image description  */
  enc->channels = 3 + (gst_video_format_has_alpha (format) ? 1 : 0);
  /* ... but any alpha is disregarded in encoding */
  if (gst_video_format_is_gray (format))
    enc->channels = 1;
  else
    enc->channels = 3;
  enc->h_max_samp = 0;
  enc->v_max_samp = 0;
  for (i = 0; i < enc->channels; ++i) {
    enc->cwidth[i] = gst_video_format_get_component_width (format, i, width);
    enc->cheight[i] = gst_video_format_get_component_height (format, i, height);
    enc->offset[i] = gst_video_format_get_component_offset (format, i, width,
        height);
    enc->stride[i] = gst_video_format_get_row_stride (format, i, width);
    enc->inc[i] = gst_video_format_get_pixel_stride (format, i);
    enc->h_samp[i] = GST_ROUND_UP_4 (width) / enc->cwidth[i];
    enc->h_max_samp = MAX (enc->h_max_samp, enc->h_samp[i]);
    enc->v_samp[i] = GST_ROUND_UP_4 (height) / enc->cheight[i];
    enc->v_max_samp = MAX (enc->v_max_samp, enc->v_samp[i]);
  }
  /* samp should only be 1, 2 or 4 */
  g_assert (enc->h_max_samp <= 4);
  g_assert (enc->v_max_samp <= 4);
  /* now invert */
  /* maximum is invariant, as one of the components should have samp 1 */
  for (i = 0; i < enc->channels; ++i) {
    enc->h_samp[i] = enc->h_max_samp / enc->h_samp[i];
    enc->v_samp[i] = enc->v_max_samp / enc->v_samp[i];
  }
  enc->planar = (enc->inc[0] == 1 && enc->inc[1] == 1 && enc->inc[2] == 1);

  othercaps = gst_caps_copy (gst_pad_get_pad_template_caps (enc->srcpad));
  gst_caps_set_simple (othercaps,
      "width", G_TYPE_INT, enc->width, "height", G_TYPE_INT, enc->height, NULL);
  if (enc->fps_den > 0)
    gst_caps_set_simple (othercaps,
        "framerate", GST_TYPE_FRACTION, enc->fps_num, enc->fps_den, NULL);
  if (enc->par_den > 0)
    gst_caps_set_simple (othercaps,
        "pixel-aspect-ratio", GST_TYPE_FRACTION, enc->par_num, enc->par_den,
        NULL);

  ret = gst_pad_set_caps (enc->srcpad, othercaps);
  gst_caps_unref (othercaps);

  if (ret)
    gst_jpegenc_resync (enc);

  gst_object_unref (enc);

  return ret;

  /* ERRORS */
refuse_caps:
  {
    GST_WARNING_OBJECT (enc, "refused caps %" GST_PTR_FORMAT, caps);
    gst_object_unref (enc);
    return FALSE;
  }
}
コード例 #13
0
ファイル: gsteglglessink.c プロジェクト: pli3/gst-plugins-bad
static gboolean
gst_eglglessink_fill_texture (GstEglGlesSink * eglglessink, GstBuffer * buf)
{
  gint w, h;

  w = GST_VIDEO_SINK_WIDTH (eglglessink);
  h = GST_VIDEO_SINK_HEIGHT (eglglessink);

  GST_DEBUG_OBJECT (eglglessink,
      "Got good buffer %p. Sink geometry is %dx%d size %d", buf, w, h,
      buf ? GST_BUFFER_SIZE (buf) : -1);

  switch (eglglessink->format) {
    case GST_VIDEO_FORMAT_RGBA:
    case GST_VIDEO_FORMAT_BGRA:
    case GST_VIDEO_FORMAT_ARGB:
    case GST_VIDEO_FORMAT_ABGR:
    case GST_VIDEO_FORMAT_RGBx:
    case GST_VIDEO_FORMAT_BGRx:
    case GST_VIDEO_FORMAT_xRGB:
    case GST_VIDEO_FORMAT_xBGR:
      glActiveTexture (GL_TEXTURE0);
      glBindTexture (GL_TEXTURE_2D, eglglessink->egl_context->texture[0]);
      glTexImage2D (GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA,
          GL_UNSIGNED_BYTE, GST_BUFFER_DATA (buf));
      break;
    case GST_VIDEO_FORMAT_AYUV:
      glActiveTexture (GL_TEXTURE0);
      glBindTexture (GL_TEXTURE_2D, eglglessink->egl_context->texture[0]);
      glTexImage2D (GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA,
          GL_UNSIGNED_BYTE, GST_BUFFER_DATA (buf));
      break;
    case GST_VIDEO_FORMAT_Y444:
    case GST_VIDEO_FORMAT_I420:
    case GST_VIDEO_FORMAT_YV12:
    case GST_VIDEO_FORMAT_Y42B:
    case GST_VIDEO_FORMAT_Y41B:{
      gint coffset, cw, ch;

      coffset =
          gst_video_format_get_component_offset (eglglessink->format, 0, w, h);
      cw = gst_video_format_get_component_width (eglglessink->format, 0, w);
      ch = gst_video_format_get_component_height (eglglessink->format, 0, h);
      glActiveTexture (GL_TEXTURE0);
      glBindTexture (GL_TEXTURE_2D, eglglessink->egl_context->texture[0]);
      glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE, cw, ch, 0,
          GL_LUMINANCE, GL_UNSIGNED_BYTE, GST_BUFFER_DATA (buf) + coffset);
      coffset =
          gst_video_format_get_component_offset (eglglessink->format, 1, w, h);
      cw = gst_video_format_get_component_width (eglglessink->format, 1, w);
      ch = gst_video_format_get_component_height (eglglessink->format, 1, h);
      glActiveTexture (GL_TEXTURE1);
      glBindTexture (GL_TEXTURE_2D, eglglessink->egl_context->texture[1]);
      glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE, cw, ch, 0,
          GL_LUMINANCE, GL_UNSIGNED_BYTE, GST_BUFFER_DATA (buf) + coffset);
      coffset =
          gst_video_format_get_component_offset (eglglessink->format, 2, w, h);
      cw = gst_video_format_get_component_width (eglglessink->format, 2, w);
      ch = gst_video_format_get_component_height (eglglessink->format, 2, h);
      glActiveTexture (GL_TEXTURE2);
      glBindTexture (GL_TEXTURE_2D, eglglessink->egl_context->texture[2]);
      glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE, cw, ch, 0,
          GL_LUMINANCE, GL_UNSIGNED_BYTE, GST_BUFFER_DATA (buf) + coffset);
      break;
    }
    case GST_VIDEO_FORMAT_YUY2:
    case GST_VIDEO_FORMAT_YVYU:
    case GST_VIDEO_FORMAT_UYVY:
      glActiveTexture (GL_TEXTURE0);
      glBindTexture (GL_TEXTURE_2D, eglglessink->egl_context->texture[0]);
      glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA, w, h, 0,
          GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, GST_BUFFER_DATA (buf));
      glActiveTexture (GL_TEXTURE1);
      glBindTexture (GL_TEXTURE_2D, eglglessink->egl_context->texture[1]);
      glTexImage2D (GL_TEXTURE_2D, 0, GL_RGBA, GST_ROUND_UP_2 (w) / 2,
          h, 0, GL_RGBA, GL_UNSIGNED_BYTE, GST_BUFFER_DATA (buf));
      break;
    case GST_VIDEO_FORMAT_NV12:
    case GST_VIDEO_FORMAT_NV21:{
      gint coffset, cw, ch;

      coffset =
          gst_video_format_get_component_offset (eglglessink->format, 0, w, h);
      cw = gst_video_format_get_component_width (eglglessink->format, 0, w);
      ch = gst_video_format_get_component_height (eglglessink->format, 0, h);
      glActiveTexture (GL_TEXTURE0);
      glBindTexture (GL_TEXTURE_2D, eglglessink->egl_context->texture[0]);
      glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE, cw, ch, 0,
          GL_LUMINANCE, GL_UNSIGNED_BYTE, GST_BUFFER_DATA (buf) + coffset);

      coffset =
          gst_video_format_get_component_offset (eglglessink->format,
          (eglglessink->format == GST_VIDEO_FORMAT_NV12 ? 1 : 2), w, h);
      cw = gst_video_format_get_component_width (eglglessink->format, 1, w);
      ch = gst_video_format_get_component_height (eglglessink->format, 1, h);
      glActiveTexture (GL_TEXTURE1);
      glBindTexture (GL_TEXTURE_2D, eglglessink->egl_context->texture[1]);
      glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA, cw, ch, 0,
          GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE,
          GST_BUFFER_DATA (buf) + coffset);
      break;
    }
    default:
      g_assert_not_reached ();
      break;
  }

  if (got_gl_error ("glTexImage2D"))
    goto HANDLE_ERROR;

  return TRUE;

HANDLE_ERROR:
  return FALSE;
}
コード例 #14
0
ファイル: gstcompare.c プロジェクト: kanongil/gst-plugins-bad
static gdouble
gst_compare_ssim (GstCompare * comp, GstBuffer * buf1, GstBuffer * buf2)
{
  GstCaps *caps;
  GstVideoFormat format, f;
  gint width, height, w, h, i, comps;
  gdouble cssim[4], ssim, c[4] = { 1.0, 0.0, 0.0, 0.0 };
  guint8 *data1, *data2;

  caps = GST_BUFFER_CAPS (buf1);
  if (!caps)
    goto invalid_input;

  if (!gst_video_format_parse_caps (caps, &format, &width, &height))
    goto invalid_input;

  caps = GST_BUFFER_CAPS (buf2);
  if (!caps)
    goto invalid_input;

  if (!gst_video_format_parse_caps (caps, &f, &w, &h))
    goto invalid_input;

  if (f != format || w != width || h != height)
    return comp->threshold + 1;

  comps = gst_video_format_is_gray (format) ? 1 : 3;
  if (gst_video_format_has_alpha (format))
    comps += 1;

  /* note that some are reported both yuv and gray */
  for (i = 0; i < comps; ++i)
    c[i] = 1.0;
  /* increase luma weight if yuv */
  if (gst_video_format_is_yuv (format) && (comps > 1))
    c[0] = comps - 1;
  for (i = 0; i < comps; ++i)
    c[i] /= (gst_video_format_is_yuv (format) && (comps > 1)) ?
        2 * (comps - 1) : comps;

  data1 = GST_BUFFER_DATA (buf1);
  data2 = GST_BUFFER_DATA (buf2);
  for (i = 0; i < comps; i++) {
    gint offset, cw, ch, step, stride;

    /* only support most common formats */
    if (gst_video_format_get_component_depth (format, i) != 8)
      goto unsupported_input;
    offset = gst_video_format_get_component_offset (format, i, width, height);
    cw = gst_video_format_get_component_width (format, i, width);
    ch = gst_video_format_get_component_height (format, i, height);
    step = gst_video_format_get_pixel_stride (format, i);
    stride = gst_video_format_get_row_stride (format, i, width);

    GST_LOG_OBJECT (comp, "component %d", i);
    cssim[i] = gst_compare_ssim_component (comp, data1 + offset, data2 + offset,
        cw, ch, step, stride);
    GST_LOG_OBJECT (comp, "ssim[%d] = %f", i, cssim[i]);
  }

#ifndef GST_DISABLE_GST_DEBUG
  for (i = 0; i < 4; i++) {
    GST_DEBUG_OBJECT (comp, "ssim[%d] = %f, c[%d] = %f", i, cssim[i], i, c[i]);
  }
#endif

  ssim = cssim[0] * c[0] + cssim[1] * c[1] + cssim[2] * c[2] + cssim[3] * c[3];

  return ssim;

  /* ERRORS */
invalid_input:
  {
    GST_ERROR_OBJECT (comp, "ssim method needs raw video input");
    return 0;
  }
unsupported_input:
  {
    GST_ERROR_OBJECT (comp, "raw video format not supported %" GST_PTR_FORMAT,
        caps);
    return 0;
  }
}
コード例 #15
0
static void
gst_deinterlace_method_setup_impl (GstDeinterlaceMethod * self,
    GstVideoFormat format, gint width, gint height)
{
  gint i;
  GstDeinterlaceMethodClass *klass = GST_DEINTERLACE_METHOD_GET_CLASS (self);

  self->format = format;
  self->frame_width = width;
  self->frame_height = height;

  self->deinterlace_frame = NULL;

  if (format == GST_VIDEO_FORMAT_UNKNOWN)
    return;

  for (i = 0; i < 4; i++) {
    self->width[i] = gst_video_format_get_component_width (format, i, width);
    self->height[i] = gst_video_format_get_component_height (format, i, height);
    self->offset[i] =
        gst_video_format_get_component_offset (format, i, width, height);
    self->row_stride[i] = gst_video_format_get_row_stride (format, i, width);
    self->pixel_stride[i] = gst_video_format_get_pixel_stride (format, i);
  }

  switch (format) {
    case GST_VIDEO_FORMAT_YUY2:
      self->deinterlace_frame = klass->deinterlace_frame_yuy2;
      break;
    case GST_VIDEO_FORMAT_YVYU:
      self->deinterlace_frame = klass->deinterlace_frame_yvyu;
      break;
    case GST_VIDEO_FORMAT_UYVY:
      self->deinterlace_frame = klass->deinterlace_frame_uyvy;
      break;
    case GST_VIDEO_FORMAT_I420:
      self->deinterlace_frame = klass->deinterlace_frame_i420;
      break;
    case GST_VIDEO_FORMAT_YV12:
      self->deinterlace_frame = klass->deinterlace_frame_yv12;
      break;
    case GST_VIDEO_FORMAT_Y444:
      self->deinterlace_frame = klass->deinterlace_frame_y444;
      break;
    case GST_VIDEO_FORMAT_Y42B:
      self->deinterlace_frame = klass->deinterlace_frame_y42b;
      break;
    case GST_VIDEO_FORMAT_Y41B:
      self->deinterlace_frame = klass->deinterlace_frame_y41b;
      break;
    case GST_VIDEO_FORMAT_AYUV:
      self->deinterlace_frame = klass->deinterlace_frame_ayuv;
      break;
    case GST_VIDEO_FORMAT_ARGB:
    case GST_VIDEO_FORMAT_xRGB:
      self->deinterlace_frame = klass->deinterlace_frame_argb;
      break;
    case GST_VIDEO_FORMAT_ABGR:
    case GST_VIDEO_FORMAT_xBGR:
      self->deinterlace_frame = klass->deinterlace_frame_abgr;
      break;
    case GST_VIDEO_FORMAT_RGBA:
    case GST_VIDEO_FORMAT_RGBx:
      self->deinterlace_frame = klass->deinterlace_frame_rgba;
      break;
    case GST_VIDEO_FORMAT_BGRA:
    case GST_VIDEO_FORMAT_BGRx:
      self->deinterlace_frame = klass->deinterlace_frame_bgra;
      break;
    case GST_VIDEO_FORMAT_RGB:
      self->deinterlace_frame = klass->deinterlace_frame_rgb;
      break;
    case GST_VIDEO_FORMAT_BGR:
      self->deinterlace_frame = klass->deinterlace_frame_bgr;
      break;
    default:
      self->deinterlace_frame = NULL;
      break;
  }
}
コード例 #16
0
static gboolean
gst_jasper_enc_sink_setcaps (GstPad * pad, GstCaps * caps)
{
    GstJasperEnc *enc;
    GstVideoFormat format;
    gint width, height;
    gint fps_num, fps_den;
    gint par_num, par_den;
    gint i;

    enc = GST_JASPER_ENC (GST_PAD_PARENT (pad));

    /* get info from caps */
    if (!gst_video_format_parse_caps (caps, &format, &width, &height))
        goto refuse_caps;
    /* optional; pass along if present */
    fps_num = fps_den = -1;
    par_num = par_den = -1;
    gst_video_parse_caps_framerate (caps, &fps_num, &fps_den);
    gst_video_parse_caps_pixel_aspect_ratio (caps, &par_num, &par_den);

    if (width == enc->width && height == enc->height && enc->format == format
            && fps_num == enc->fps_num && fps_den == enc->fps_den
            && par_num == enc->par_num && par_den == enc->par_den)
        return TRUE;

    /* store input description */
    enc->format = format;
    enc->width = width;
    enc->height = height;
    enc->fps_num = fps_num;
    enc->fps_den = fps_den;
    enc->par_num = par_num;
    enc->par_den = par_den;

    /* prepare a cached image description  */
    enc->channels = 3 + (gst_video_format_has_alpha (format) ? 1 : 0);
    for (i = 0; i < enc->channels; ++i) {
        enc->cwidth[i] = gst_video_format_get_component_width (format, i, width);
        enc->cheight[i] = gst_video_format_get_component_height (format, i, height);
        enc->offset[i] = gst_video_format_get_component_offset (format, i, width,
                         height);
        enc->stride[i] = gst_video_format_get_row_stride (format, i, width);
        enc->inc[i] = gst_video_format_get_pixel_stride (format, i);
    }

    if (!gst_jasper_enc_set_src_caps (enc))
        goto setcaps_failed;
    if (!gst_jasper_enc_init_encoder (enc))
        goto setup_failed;

    return TRUE;

    /* ERRORS */
setup_failed:
    {
        GST_ELEMENT_ERROR (enc, LIBRARY, SETTINGS, (NULL), (NULL));
        return FALSE;
    }
setcaps_failed:
    {
        GST_WARNING_OBJECT (enc, "Setting src caps failed");
        GST_ELEMENT_ERROR (enc, LIBRARY, SETTINGS, (NULL), (NULL));
        return FALSE;
    }
refuse_caps:
    {
        GST_WARNING_OBJECT (enc, "refused caps %" GST_PTR_FORMAT, caps);
        gst_object_unref (enc);
        return FALSE;
    }
}
コード例 #17
0
static void
gst_video_flip_planar_yuv (GstVideoFlip * videoflip, guint8 * dest,
    const guint8 * src)
{
  gint x, y;
  guint8 const *s;
  guint8 *d;
  GstVideoFormat format = videoflip->format;
  gint sw = videoflip->from_width;
  gint sh = videoflip->from_height;
  gint dw = videoflip->to_width;
  gint dh = videoflip->to_height;
  gint src_y_stride, src_u_stride, src_v_stride;
  gint src_y_offset, src_u_offset, src_v_offset;
  gint src_y_height, src_u_height, src_v_height;
  gint src_y_width, src_u_width, src_v_width;
  gint dest_y_stride, dest_u_stride, dest_v_stride;
  gint dest_y_offset, dest_u_offset, dest_v_offset;
  gint dest_y_height, dest_u_height, dest_v_height;
  gint dest_y_width, dest_u_width, dest_v_width;

  src_y_stride = gst_video_format_get_row_stride (format, 0, sw);
  src_u_stride = gst_video_format_get_row_stride (format, 1, sw);
  src_v_stride = gst_video_format_get_row_stride (format, 2, sw);

  dest_y_stride = gst_video_format_get_row_stride (format, 0, dw);
  dest_u_stride = gst_video_format_get_row_stride (format, 1, dw);
  dest_v_stride = gst_video_format_get_row_stride (format, 2, dw);

  src_y_offset = gst_video_format_get_component_offset (format, 0, sw, sh);
  src_u_offset = gst_video_format_get_component_offset (format, 1, sw, sh);
  src_v_offset = gst_video_format_get_component_offset (format, 2, sw, sh);

  dest_y_offset = gst_video_format_get_component_offset (format, 0, dw, dh);
  dest_u_offset = gst_video_format_get_component_offset (format, 1, dw, dh);
  dest_v_offset = gst_video_format_get_component_offset (format, 2, dw, dh);

  src_y_width = gst_video_format_get_component_width (format, 0, sw);
  src_u_width = gst_video_format_get_component_width (format, 1, sw);
  src_v_width = gst_video_format_get_component_width (format, 2, sw);

  dest_y_width = gst_video_format_get_component_width (format, 0, dw);
  dest_u_width = gst_video_format_get_component_width (format, 1, dw);
  dest_v_width = gst_video_format_get_component_width (format, 2, dw);

  src_y_height = gst_video_format_get_component_height (format, 0, sh);
  src_u_height = gst_video_format_get_component_height (format, 1, sh);
  src_v_height = gst_video_format_get_component_height (format, 2, sh);

  dest_y_height = gst_video_format_get_component_height (format, 0, dh);
  dest_u_height = gst_video_format_get_component_height (format, 1, dh);
  dest_v_height = gst_video_format_get_component_height (format, 2, dh);

  switch (videoflip->method) {
    case GST_VIDEO_FLIP_METHOD_90R:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] =
              s[(src_y_height - 1 - x) * src_y_stride + y];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] =
              s[(src_u_height - 1 - x) * src_u_stride + y];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_v_height; y++) {
        for (x = 0; x < dest_v_width; x++) {
          d[y * dest_v_stride + x] =
              s[(src_v_height - 1 - x) * src_v_stride + y];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_90L:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] =
              s[x * src_y_stride + (src_y_width - 1 - y)];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] =
              s[x * src_u_stride + (src_u_width - 1 - y)];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_v_height; y++) {
        for (x = 0; x < dest_v_width; x++) {
          d[y * dest_v_stride + x] =
              s[x * src_v_stride + (src_v_width - 1 - y)];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_180:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] =
              s[(src_y_height - 1 - y) * src_y_stride + (src_y_width - 1 - x)];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] =
              s[(src_u_height - 1 - y) * src_u_stride + (src_u_width - 1 - x)];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_v_height; y++) {
        for (x = 0; x < dest_v_width; x++) {
          d[y * dest_v_stride + x] =
              s[(src_v_height - 1 - y) * src_v_stride + (src_v_width - 1 - x)];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_HORIZ:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] =
              s[y * src_y_stride + (src_y_width - 1 - x)];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] =
              s[y * src_u_stride + (src_u_width - 1 - x)];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_v_height; y++) {
        for (x = 0; x < dest_v_width; x++) {
          d[y * dest_v_stride + x] =
              s[y * src_v_stride + (src_v_width - 1 - x)];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_VERT:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] =
              s[(src_y_height - 1 - y) * src_y_stride + x];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] =
              s[(src_u_height - 1 - y) * src_u_stride + x];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_v_height; y++) {
        for (x = 0; x < dest_v_width; x++) {
          d[y * dest_v_stride + x] =
              s[(src_v_height - 1 - y) * src_v_stride + x];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_TRANS:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] = s[x * src_y_stride + y];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] = s[x * src_u_stride + y];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_v_stride + x] = s[x * src_v_stride + y];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_OTHER:
      /* Flip Y */
      s = src + src_y_offset;
      d = dest + dest_y_offset;
      for (y = 0; y < dest_y_height; y++) {
        for (x = 0; x < dest_y_width; x++) {
          d[y * dest_y_stride + x] =
              s[(src_y_height - 1 - x) * src_y_stride + (src_y_width - 1 - y)];
        }
      }
      /* Flip U */
      s = src + src_u_offset;
      d = dest + dest_u_offset;
      for (y = 0; y < dest_u_height; y++) {
        for (x = 0; x < dest_u_width; x++) {
          d[y * dest_u_stride + x] =
              s[(src_u_height - 1 - x) * src_u_stride + (src_u_width - 1 - y)];
        }
      }
      /* Flip V */
      s = src + src_v_offset;
      d = dest + dest_v_offset;
      for (y = 0; y < dest_v_height; y++) {
        for (x = 0; x < dest_v_width; x++) {
          d[y * dest_v_stride + x] =
              s[(src_v_height - 1 - x) * src_v_stride + (src_v_width - 1 - y)];
        }
      }
      break;
    case GST_VIDEO_FLIP_METHOD_IDENTITY:
      g_assert_not_reached ();
      break;
    default:
      g_assert_not_reached ();
      break;
  }
}
コード例 #18
0
bool GStreamerReader::DecodeVideoFrame(bool &aKeyFrameSkip,
                                         int64_t aTimeThreshold)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");

  GstBuffer* buffer = nullptr;
  int64_t timestamp, nextTimestamp;
  while (true)
  {
    if (!WaitForDecodedData(&mVideoSinkBufferCount)) {
      mVideoQueue.Finish();
      break;
    }
    mDecoder->NotifyDecodedFrames(0, 1);

    buffer = gst_app_sink_pull_buffer(mVideoAppSink);
    bool isKeyframe = !GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_DISCONT);
    if ((aKeyFrameSkip && !isKeyframe)) {
      gst_buffer_unref(buffer);
      buffer = nullptr;
      continue;
    }

    timestamp = GST_BUFFER_TIMESTAMP(buffer);
    {
      ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
      timestamp = gst_segment_to_stream_time(&mVideoSegment,
          GST_FORMAT_TIME, timestamp);
    }
    NS_ASSERTION(GST_CLOCK_TIME_IS_VALID(timestamp),
        "frame has invalid timestamp");
    timestamp = nextTimestamp = GST_TIME_AS_USECONDS(timestamp);
    if (GST_CLOCK_TIME_IS_VALID(GST_BUFFER_DURATION(buffer)))
      nextTimestamp += GST_TIME_AS_USECONDS(GST_BUFFER_DURATION(buffer));
    else if (fpsNum && fpsDen)
      /* add 1-frame duration */
      nextTimestamp += gst_util_uint64_scale(GST_USECOND, fpsNum, fpsDen);

    if (timestamp < aTimeThreshold) {
      LOG(PR_LOG_DEBUG, ("skipping frame %" GST_TIME_FORMAT
            " threshold %" GST_TIME_FORMAT,
            GST_TIME_ARGS(timestamp), GST_TIME_ARGS(aTimeThreshold)));
      gst_buffer_unref(buffer);
      buffer = nullptr;
      continue;
    }

    break;
  }

  if (!buffer)
    /* no more frames */
    return false;

  nsRefPtr<PlanarYCbCrImage> image;
#if GST_VERSION_MICRO >= 36
  const GstStructure* structure = gst_buffer_get_qdata(buffer,
      g_quark_from_string("moz-reader-data"));
  const GValue* value = gst_structure_get_value(structure, "image");
  if (value) {
    BufferData* data = reinterpret_cast<BufferData*>(g_value_get_boxed(value));
    image = data->mImage;
  }
#endif

  if (!image) {
    /* Ugh, upstream is not calling gst_pad_alloc_buffer(). Fallback to
     * allocating a PlanarYCbCrImage backed GstBuffer here and memcpy.
     */
    GstBuffer* tmp = nullptr;
    AllocateVideoBufferFull(nullptr, GST_BUFFER_OFFSET(buffer),
        GST_BUFFER_SIZE(buffer), nullptr, &tmp, image);

    /* copy */
    gst_buffer_copy_metadata(tmp, buffer, (GstBufferCopyFlags)GST_BUFFER_COPY_ALL);
    memcpy(GST_BUFFER_DATA(tmp), GST_BUFFER_DATA(buffer),
        GST_BUFFER_SIZE(tmp));
    gst_buffer_unref(buffer);
    buffer = tmp;
  }

  guint8* data = GST_BUFFER_DATA(buffer);

  int width = mPicture.width;
  int height = mPicture.height;
  GstVideoFormat format = mFormat;

  VideoData::YCbCrBuffer b;
  for(int i = 0; i < 3; i++) {
    b.mPlanes[i].mData = data + gst_video_format_get_component_offset(format, i,
        width, height);
    b.mPlanes[i].mStride = gst_video_format_get_row_stride(format, i, width);
    b.mPlanes[i].mHeight = gst_video_format_get_component_height(format,
        i, height);
    b.mPlanes[i].mWidth = gst_video_format_get_component_width(format,
        i, width);
    b.mPlanes[i].mOffset = 0;
    b.mPlanes[i].mSkip = 0;
  }

  bool isKeyframe = !GST_BUFFER_FLAG_IS_SET(buffer,
      GST_BUFFER_FLAG_DELTA_UNIT);
  /* XXX ? */
  int64_t offset = 0;
  VideoData* video = VideoData::Create(mInfo, image, offset,
                                       timestamp, nextTimestamp, b,
                                       isKeyframe, -1, mPicture);
  mVideoQueue.Push(video);
  gst_buffer_unref(buffer);

  return true;
}