Пример #1
0
static GstFlowReturn
gst_mpeg2dec_alloc_buffer (GstMpeg2dec * mpeg2dec, GstVideoCodecFrame * frame,
    GstBuffer ** buffer)
{
  GstFlowReturn ret;
  GstVideoFrame vframe;
  guint8 *buf[3];

  ret =
      gst_mpeg2dec_alloc_sized_buf (mpeg2dec, mpeg2dec->decoded_info.size,
      frame, buffer);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto beach;

  if (mpeg2dec->need_cropping && mpeg2dec->has_cropping) {
    GstVideoCropMeta *crop;
    GstVideoCodecState *state;
    GstVideoInfo *vinfo;

    state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (mpeg2dec));
    vinfo = &state->info;

    crop = gst_buffer_add_video_crop_meta (frame->output_buffer);
    /* we can do things slightly more efficient when we know that
     * downstream understands clipping */
    crop->x = 0;
    crop->y = 0;
    crop->width = vinfo->width;
    crop->height = vinfo->height;

    gst_video_codec_state_unref (state);
  }

  if (!gst_video_frame_map (&vframe, &mpeg2dec->decoded_info, *buffer,
          GST_MAP_READ | GST_MAP_WRITE))
    goto map_fail;

  buf[0] = GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0);
  buf[1] = GST_VIDEO_FRAME_PLANE_DATA (&vframe, 1);
  buf[2] = GST_VIDEO_FRAME_PLANE_DATA (&vframe, 2);

  GST_DEBUG_OBJECT (mpeg2dec, "set_buf: %p %p %p, frame %i",
      buf[0], buf[1], buf[2], frame->system_frame_number);

  /* Note: We use a non-null 'id' value to make the distinction
   * between the dummy buffers (which have an id of NULL) and the
   * ones we did */
  mpeg2_set_buf (mpeg2dec->decoder, buf,
      GINT_TO_POINTER (frame->system_frame_number + 1));
  gst_mpeg2dec_save_buffer (mpeg2dec, frame->system_frame_number, &vframe);

beach:
  return ret;

map_fail:
  {
    GST_ERROR_OBJECT (mpeg2dec, "Failed to map frame");
    return GST_FLOW_ERROR;
  }
}
Пример #2
0
/* this function does the actual processing
 */
static GstFlowReturn
gst_yuv_to_rgb_transform_frame (GstVideoFilter *filter, GstVideoFrame *in_frame, GstVideoFrame *out_frame)
{
  GstYuvToRgb *rgbtoyuv = GST_YUVTORGB_CAST (filter);
  gint width, height, stride;
  gint y_stride, uv_stride;
  guint32 *out_data;
  guint8 *y_in, *u_in, *v_in;

  y_stride = GST_VIDEO_FRAME_PLANE_STRIDE (in_frame, 0);
  uv_stride = GST_VIDEO_FRAME_PLANE_STRIDE (in_frame, 1);

  y_in = (guint8*) GST_VIDEO_FRAME_PLANE_DATA (in_frame, 0);
  u_in = (guint8*) GST_VIDEO_FRAME_PLANE_DATA (in_frame, 1);
  v_in = (guint8*) GST_VIDEO_FRAME_PLANE_DATA (in_frame, 2);

  width = GST_VIDEO_FRAME_WIDTH (out_frame);
  height = GST_VIDEO_FRAME_HEIGHT (out_frame);
  stride = GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, 0);

  out_data = (guint32*) GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0);

  // GST_INFO ("DEBUG_INFO: rgbtoyuv::transform_frame: ");
  // GST_INFO ("in stride: %d; out stride: %d %d\n", stride, y_stride, uv_stride);

  libyuv::I420ToARGB (y_in, y_stride,
              u_in, uv_stride,
              v_in, uv_stride,
              (guint8*)out_data, stride,
              width, height);

  return GST_FLOW_OK;
}
static void
gst_video_crop_transform_packed_simple (GstVideoCrop * vcrop,
    GstVideoFrame * in_frame, GstVideoFrame * out_frame)
{
  guint8 *in_data, *out_data;
  gint width, height;
  guint i, dx;
  gint in_stride, out_stride;

  width = GST_VIDEO_FRAME_WIDTH (out_frame);
  height = GST_VIDEO_FRAME_HEIGHT (out_frame);

  in_data = GST_VIDEO_FRAME_PLANE_DATA (in_frame, 0);
  out_data = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0);

  in_stride = GST_VIDEO_FRAME_PLANE_STRIDE (in_frame, 0);
  out_stride = GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, 0);

  in_data += vcrop->crop_top * in_stride;
  in_data += vcrop->crop_left * GST_VIDEO_FRAME_COMP_PSTRIDE (in_frame, 0);

  dx = width * GST_VIDEO_FRAME_COMP_PSTRIDE (out_frame, 0);

  for (i = 0; i < height; ++i) {
    memcpy (out_data, in_data, dx);
    in_data += in_stride;
    out_data += out_stride;
  }
}
Пример #4
0
static GstFlowReturn
gst_quarktv_transform_frame (GstVideoFilter * vfilter, GstVideoFrame * in_frame,
                             GstVideoFrame * out_frame)
{
    GstQuarkTV *filter = GST_QUARKTV (vfilter);
    gint area;
    guint32 *src, *dest;
    GstClockTime timestamp;
    GstBuffer **planetable;
    gint planes, current_plane;

    timestamp = GST_BUFFER_TIMESTAMP (in_frame->buffer);
    timestamp =
        gst_segment_to_stream_time (&GST_BASE_TRANSFORM (vfilter)->segment,
                                    GST_FORMAT_TIME, timestamp);

    GST_DEBUG_OBJECT (filter, "sync to %" GST_TIME_FORMAT,
                      GST_TIME_ARGS (timestamp));

    if (GST_CLOCK_TIME_IS_VALID (timestamp))
        gst_object_sync_values (GST_OBJECT (filter), timestamp);

    if (G_UNLIKELY (filter->planetable == NULL))
        return GST_FLOW_FLUSHING;

    src = GST_VIDEO_FRAME_PLANE_DATA (in_frame, 0);
    dest = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0);

    GST_OBJECT_LOCK (filter);
    area = filter->area;
    planetable = filter->planetable;
    planes = filter->planes;
    current_plane = filter->current_plane;

    if (planetable[current_plane])
        gst_buffer_unref (planetable[current_plane]);
    planetable[current_plane] = gst_buffer_ref (in_frame->buffer);

    /* For each pixel */
    while (--area) {
        GstBuffer *rand;

        /* pick a random buffer */
        rand = planetable[(current_plane + (fastrand () >> 24)) % planes];

        /* Copy the pixel from the random buffer to dest, FIXME, slow */
        if (rand)
            gst_buffer_extract (rand, area * 4, &dest[area], 4);
        else
            dest[area] = src[area];
    }

    filter->current_plane--;
    if (filter->current_plane < 0)
        filter->current_plane = planes - 1;
    GST_OBJECT_UNLOCK (filter);

    return GST_FLOW_OK;
}
Пример #5
0
static void
copy_field (GstInterlace * interlace, GstBuffer * dest, GstBuffer * src,
    int field_index)
{
  GstVideoInfo *info = &interlace->info;
  gint i, j, n_planes;
  guint8 *d, *s;
  GstVideoFrame dframe, sframe;

  if (!gst_video_frame_map (&dframe, info, dest, GST_MAP_WRITE))
    goto dest_map_failed;

  if (!gst_video_frame_map (&sframe, info, src, GST_MAP_READ))
    goto src_map_failed;

  n_planes = GST_VIDEO_FRAME_N_PLANES (&dframe);

  for (i = 0; i < n_planes; i++) {
    gint cheight, cwidth;
    gint ss, ds;

    d = GST_VIDEO_FRAME_PLANE_DATA (&dframe, i);
    s = GST_VIDEO_FRAME_PLANE_DATA (&sframe, i);

    ds = GST_VIDEO_FRAME_PLANE_STRIDE (&dframe, i);
    ss = GST_VIDEO_FRAME_PLANE_STRIDE (&sframe, i);

    d += field_index * ds;
    s += field_index * ss;

    cheight = GST_VIDEO_FRAME_COMP_HEIGHT (&dframe, i);
    cwidth = MIN (ABS (ss), ABS (ds));

    for (j = field_index; j < cheight; j += 2) {
      memcpy (d, s, cwidth);
      d += ds * 2;
      s += ss * 2;
    }
  }

  gst_video_frame_unmap (&dframe);
  gst_video_frame_unmap (&sframe);
  return;

dest_map_failed:
  {
    GST_ERROR_OBJECT (interlace, "failed to map dest");
    return;
  }
src_map_failed:
  {
    GST_ERROR_OBJECT (interlace, "failed to map src");
    gst_video_frame_unmap (&dframe);
    return;
  }
}
Пример #6
0
static void
gst_video_balance_semiplanar_yuv (GstVideoBalance * videobalance,
    GstVideoFrame * frame)
{
  gint x, y;
  guint8 *ydata;
  guint8 *uvdata;
  gint ystride, uvstride;
  gint width, height;
  gint width2, height2;
  guint8 *tabley = videobalance->tabley;
  guint8 **tableu = videobalance->tableu;
  guint8 **tablev = videobalance->tablev;
  gint upos, vpos;

  width = GST_VIDEO_FRAME_WIDTH (frame);
  height = GST_VIDEO_FRAME_HEIGHT (frame);

  ydata = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
  ystride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);

  for (y = 0; y < height; y++) {
    guint8 *yptr;

    yptr = ydata + y * ystride;
    for (x = 0; x < width; x++) {
      *yptr = tabley[*yptr];
      yptr++;
    }
  }

  width2 = GST_VIDEO_FRAME_COMP_WIDTH (frame, 1);
  height2 = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 1);

  uvdata = GST_VIDEO_FRAME_PLANE_DATA (frame, 1);
  uvstride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 1);

  upos = GST_VIDEO_INFO_FORMAT (&frame->info) == GST_VIDEO_FORMAT_NV12 ? 0 : 1;
  vpos = GST_VIDEO_INFO_FORMAT (&frame->info) == GST_VIDEO_FORMAT_NV12 ? 1 : 0;

  for (y = 0; y < height2; y++) {
    guint8 *uvptr;
    guint8 u1, v1;

    uvptr = uvdata + y * uvstride;

    for (x = 0; x < width2; x++) {
      u1 = uvptr[upos];
      v1 = uvptr[vpos];

      uvptr[upos] = tableu[u1][v1];
      uvptr[vpos] = tablev[u1][v1];
      uvptr += 2;
    }
  }
}
Пример #7
0
static void
gst_video_balance_planar_yuv (GstVideoBalance * videobalance,
    GstVideoFrame * frame)
{
  gint x, y;
  guint8 *ydata;
  guint8 *udata, *vdata;
  gint ystride, ustride, vstride;
  gint width, height;
  gint width2, height2;
  guint8 *tabley = videobalance->tabley;
  guint8 **tableu = videobalance->tableu;
  guint8 **tablev = videobalance->tablev;

  width = GST_VIDEO_FRAME_WIDTH (frame);
  height = GST_VIDEO_FRAME_HEIGHT (frame);

  ydata = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
  ystride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);

  for (y = 0; y < height; y++) {
    guint8 *yptr;

    yptr = ydata + y * ystride;
    for (x = 0; x < width; x++) {
      *yptr = tabley[*yptr];
      yptr++;
    }
  }

  width2 = GST_VIDEO_FRAME_COMP_WIDTH (frame, 1);
  height2 = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 1);

  udata = GST_VIDEO_FRAME_PLANE_DATA (frame, 1);
  vdata = GST_VIDEO_FRAME_PLANE_DATA (frame, 2);
  ustride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 1);
  vstride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 2);

  for (y = 0; y < height2; y++) {
    guint8 *uptr, *vptr;
    guint8 u1, v1;

    uptr = udata + y * ustride;
    vptr = vdata + y * vstride;

    for (x = 0; x < width2; x++) {
      u1 = *uptr;
      v1 = *vptr;

      *uptr++ = tableu[u1][v1];
      *vptr++ = tablev[u1][v1];
    }
  }
}
static void
gst_video_crop_transform_packed_complex (GstVideoCrop * vcrop,
    GstVideoFrame * in_frame, GstVideoFrame * out_frame)
{
  guint8 *in_data, *out_data;
  guint i, dx;
  gint width, height;
  gint in_stride;
  gint out_stride;

  width = GST_VIDEO_FRAME_WIDTH (out_frame);
  height = GST_VIDEO_FRAME_HEIGHT (out_frame);

  in_data = GST_VIDEO_FRAME_PLANE_DATA (in_frame, 0);
  out_data = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0);

  in_stride = GST_VIDEO_FRAME_PLANE_STRIDE (in_frame, 0);
  out_stride = GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, 0);

  in_data += vcrop->crop_top * in_stride;

  /* rounding down here so we end up at the start of a macro-pixel and not
   * in the middle of one */
  in_data += ROUND_DOWN_2 (vcrop->crop_left) *
      GST_VIDEO_FRAME_COMP_PSTRIDE (in_frame, 0);

  dx = width * GST_VIDEO_FRAME_COMP_PSTRIDE (out_frame, 0);

  /* UYVY = 4:2:2 - [U0 Y0 V0 Y1] [U2 Y2 V2 Y3] [U4 Y4 V4 Y5]
   * YUYV = 4:2:2 - [Y0 U0 Y1 V0] [Y2 U2 Y3 V2] [Y4 U4 Y5 V4] = YUY2 */
  if ((vcrop->crop_left % 2) != 0) {
    for (i = 0; i < height; ++i) {
      gint j;

      memcpy (out_data, in_data, dx);

      /* move just the Y samples one pixel to the left, don't worry about
       * chroma shift */
      for (j = vcrop->macro_y_off; j < out_stride - 2; j += 2)
        out_data[j] = in_data[j + 2];

      in_data += in_stride;
      out_data += out_stride;
    }
  } else {
    for (i = 0; i < height; ++i) {
      memcpy (out_data, in_data, dx);
      in_data += in_stride;
      out_data += out_stride;
    }
  }
}
Пример #9
0
static GstFlowReturn
gst_shagadelictv_transform_frame (GstVideoFilter * vfilter,
                                  GstVideoFrame * in_frame, GstVideoFrame * out_frame)
{
    GstShagadelicTV *filter = GST_SHAGADELICTV (vfilter);
    guint32 *src, *dest;
    gint x, y;
    guint32 v;
    guint8 r, g, b;
    gint width, height;

    src = GST_VIDEO_FRAME_PLANE_DATA (in_frame, 0);
    dest = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0);

    width = GST_VIDEO_FRAME_WIDTH (in_frame);
    height = GST_VIDEO_FRAME_HEIGHT (in_frame);

    for (y = 0; y < height; y++) {
        for (x = 0; x < width; x++) {
            v = *src++ | 0x1010100;
            v = (v - 0x707060) & 0x1010100;
            v -= v >> 8;
            /* Try another Babe!
             * v = *src++;
             * *dest++ = v & ((r<<16)|(g<<8)|b);
             */
            r = ((gint8) (filter->ripple[(filter->ry + y) * width * 2 + filter->rx +
                                         x] + filter->phase * 2)) >> 7;
            g = ((gint8) (filter->spiral[y * width + x] + filter->phase * 3)) >> 7;
            b = ((gint8) (filter->ripple[(filter->by + y) * width * 2 + filter->bx +
                                         x] - filter->phase)) >> 7;
            *dest++ = v & ((r << 16) | (g << 8) | b);
        }
    }

    filter->phase -= 8;
    if ((filter->rx + filter->rvx) < 0 || (filter->rx + filter->rvx) >= width)
        filter->rvx = -filter->rvx;
    if ((filter->ry + filter->rvy) < 0 || (filter->ry + filter->rvy) >= height)
        filter->rvy = -filter->rvy;
    if ((filter->bx + filter->bvx) < 0 || (filter->bx + filter->bvx) >= width)
        filter->bvx = -filter->bvx;
    if ((filter->by + filter->bvy) < 0 || (filter->by + filter->bvy) >= height)
        filter->bvy = -filter->bvy;
    filter->rx += filter->rvx;
    filter->ry += filter->rvy;
    filter->bx += filter->bvx;
    filter->by += filter->bvy;

    return GST_FLOW_OK;
}
Пример #10
0
static void
gst_deinterlace_simple_method_deinterlace_frame_planar (GstDeinterlaceMethod *
    method, const GstDeinterlaceField * history, guint history_count,
    GstVideoFrame * outframe, gint cur_field_idx)
{
  GstDeinterlaceSimpleMethod *self = GST_DEINTERLACE_SIMPLE_METHOD (method);
  GstDeinterlaceMethodClass *dm_class = GST_DEINTERLACE_METHOD_GET_CLASS (self);
  guint8 *out;
  const guint8 *field0, *field1, *field2, *fieldp;
  guint cur_field_flags = history[cur_field_idx].flags;
  gint i;
  GstDeinterlaceSimpleMethodFunction copy_scanline;
  GstDeinterlaceSimpleMethodFunction interpolate_scanline;

  g_assert (self->interpolate_scanline_planar[0] != NULL);
  g_assert (self->interpolate_scanline_planar[1] != NULL);
  g_assert (self->interpolate_scanline_planar[2] != NULL);
  g_assert (self->copy_scanline_planar[0] != NULL);
  g_assert (self->copy_scanline_planar[1] != NULL);
  g_assert (self->copy_scanline_planar[2] != NULL);

  for (i = 0; i < 3; i++) {
    copy_scanline = self->copy_scanline_planar[i];
    interpolate_scanline = self->interpolate_scanline_planar[i];

    out = GST_VIDEO_FRAME_PLANE_DATA (outframe, i);

    fieldp = NULL;
    if (cur_field_idx > 0) {
      fieldp = GST_VIDEO_FRAME_PLANE_DATA (history[cur_field_idx - 1].frame, i);
    }

    field0 = GST_VIDEO_FRAME_PLANE_DATA (history[cur_field_idx].frame, i);

    g_assert (dm_class->fields_required <= 4);

    field1 = NULL;
    if (cur_field_idx + 1 < history_count) {
      field1 = GST_VIDEO_FRAME_PLANE_DATA (history[cur_field_idx + 1].frame, i);
    }

    field2 = NULL;
    if (cur_field_idx + 2 < history_count) {
      field2 = GST_VIDEO_FRAME_PLANE_DATA (history[cur_field_idx + 2].frame, i);
    }

    gst_deinterlace_simple_method_deinterlace_frame_planar_plane (self, out,
        field0, field1, field2, fieldp, cur_field_flags, i, copy_scanline,
        interpolate_scanline);
  }
}
Пример #11
0
/* Copy the frame data from the GstBuffer (from decoder)
 * to the picture obtained from downstream in VLC.
 * This function should be avoided as much
 * as possible, since it involves a complete frame copy. */
static void gst_CopyPicture( picture_t *p_pic, GstVideoFrame *p_frame )
{
    int i_plane, i_planes, i_line, i_dst_stride, i_src_stride;
    uint8_t *p_dst, *p_src;
    int i_w, i_h;

    i_planes = p_pic->i_planes;
    for( i_plane = 0; i_plane < i_planes; i_plane++ )
    {
        p_dst = p_pic->p[i_plane].p_pixels;
        p_src = GST_VIDEO_FRAME_PLANE_DATA( p_frame, i_plane );
        i_dst_stride = p_pic->p[i_plane].i_pitch;
        i_src_stride = GST_VIDEO_FRAME_PLANE_STRIDE( p_frame, i_plane );

        i_w = GST_VIDEO_FRAME_COMP_WIDTH( p_frame,
                i_plane ) * GST_VIDEO_FRAME_COMP_PSTRIDE( p_frame, i_plane );
        i_h = GST_VIDEO_FRAME_COMP_HEIGHT( p_frame, i_plane );

        for( i_line = 0;
                i_line < __MIN( p_pic->p[i_plane].i_lines, i_h );
                i_line++ )
        {
            memcpy( p_dst, p_src, i_w );
            p_src += i_src_stride;
            p_dst += i_dst_stride;
        }
    }
}
Пример #12
0
static GstFlowReturn
openni2_read_gstbuffer (GstOpenni2Src * src, GstBuffer * buf)
{
  openni::Status rc = openni::STATUS_OK;
  openni::VideoStream * pStream = src->depth;
  int changedStreamDummy;
  GstVideoFrame vframe;
  uint64_t oni_ts;

  /* Block until we get some data */
  rc = openni::OpenNI::waitForAnyStream (&pStream, 1, &changedStreamDummy,
      SAMPLE_READ_WAIT_TIMEOUT);
  if (rc != openni::STATUS_OK) {
    GST_ERROR_OBJECT (src, "Frame read timeout: %s",
        openni::OpenNI::getExtendedError ());
    return GST_FLOW_ERROR;
  }

  if (src->depth->isValid () && src->color->isValid () &&
      src->sourcetype == SOURCETYPE_BOTH) {
    rc = src->depth->readFrame (src->depthFrame);
    if (rc != openni::STATUS_OK) {
      GST_ERROR_OBJECT (src, "Frame read error: %s",
          openni::OpenNI::getExtendedError ());
      return GST_FLOW_ERROR;
    }
    rc = src->color->readFrame (src->colorFrame);
    if (rc != openni::STATUS_OK) {
      GST_ERROR_OBJECT (src, "Frame read error: %s",
          openni::OpenNI::getExtendedError ());
      return GST_FLOW_ERROR;
    }

    /* Copy colour information */
    gst_video_frame_map (&vframe, &src->info, buf, GST_MAP_WRITE);

    guint8 *pData = (guint8 *) GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0);
    guint8 *pColor = (guint8 *) src->colorFrame->getData ();
    /* Add depth as 8bit alpha channel, depth is 16bit samples. */
    guint16 *pDepth = (guint16 *) src->depthFrame->getData ();

    for (int i = 0; i < src->colorFrame->getHeight (); ++i) {
      for (int j = 0; j < src->colorFrame->getWidth (); ++j) {
        pData[4 * j + 0] = pColor[3 * j + 0];
        pData[4 * j + 1] = pColor[3 * j + 1];
        pData[4 * j + 2] = pColor[3 * j + 2];
        pData[4 * j + 3] = pDepth[j] >> 8;
      }
      pData += GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, 0);
      pColor += src->colorFrame->getStrideInBytes ();
      pDepth += src->depthFrame->getStrideInBytes () / 2;
    }
    gst_video_frame_unmap (&vframe);

    oni_ts = src->colorFrame->getTimestamp () * 1000;

    GST_LOG_OBJECT (src, "sending buffer (%d+%d)B",
        src->colorFrame->getDataSize (),
        src->depthFrame->getDataSize ());
  } else if (src->depth->isValid () && src->sourcetype == SOURCETYPE_DEPTH) {
Пример #13
0
static void
fill_image_packed8_3 (opj_image_t * image, GstVideoFrame * frame)
{
  gint x, y, w, h;
  const guint8 *data_in, *tmp;
  gint *data_out[3];
  gint sstride;

  w = GST_VIDEO_FRAME_WIDTH (frame);
  h = GST_VIDEO_FRAME_HEIGHT (frame);
  data_in = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
  sstride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);

  data_out[0] = image->comps[0].data;
  data_out[1] = image->comps[1].data;
  data_out[2] = image->comps[2].data;

  for (y = 0; y < h; y++) {
    tmp = data_in;

    for (x = 0; x < w; x++) {
      *data_out[0] = tmp[1];
      *data_out[1] = tmp[2];
      *data_out[2] = tmp[3];

      tmp += 4;
      data_out[0]++;
      data_out[1]++;
      data_out[2]++;
    }
    data_in += sstride;
  }
}
Пример #14
0
static void
fill_frame_planar16_1 (GstVideoFrame * frame, opj_image_t * image)
{
  gint x, y, w, h;
  guint16 *data_out, *tmp;
  const gint *data_in;
  gint dstride;
  gint shift;

  w = GST_VIDEO_FRAME_WIDTH (frame);
  h = GST_VIDEO_FRAME_HEIGHT (frame);
  data_out = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
  dstride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0) / 2;

  data_in = image->comps[0].data;

  shift = 16 - image->comps[0].prec;

  for (y = 0; y < h; y++) {
    tmp = data_out;

    for (x = 0; x < w; x++) {
      *tmp = *data_in << shift;

      tmp++;
      data_in++;
    }
    data_out += dstride;
  }
}
Пример #15
0
static void
fill_frame_packed8_3 (GstVideoFrame * frame, opj_image_t * image)
{
  gint x, y, w, h;
  guint8 *data_out, *tmp;
  const gint *data_in[3];
  gint dstride;

  w = GST_VIDEO_FRAME_WIDTH (frame);
  h = GST_VIDEO_FRAME_HEIGHT (frame);
  data_out = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
  dstride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);

  data_in[0] = image->comps[0].data;
  data_in[1] = image->comps[1].data;
  data_in[2] = image->comps[2].data;

  for (y = 0; y < h; y++) {
    tmp = data_out;

    for (x = 0; x < w; x++) {
      tmp[1] = *data_in[0];
      tmp[2] = *data_in[1];
      tmp[3] = *data_in[2];

      tmp += 4;
      data_in[0]++;
      data_in[1]++;
      data_in[2]++;
    }
    data_out += dstride;
  }
}
Пример #16
0
static GstFlowReturn
gst_cacasink_render (GstBaseSink * basesink, GstBuffer * buffer)
{
  GstCACASink *cacasink = GST_CACASINK (basesink);
  GstVideoFrame frame;

  GST_DEBUG ("render");

  if (!gst_video_frame_map (&frame, &cacasink->info, buffer, GST_MAP_READ))
    goto invalid_frame;

  caca_clear ();
  caca_draw_bitmap (0, 0, cacasink->screen_width - 1,
      cacasink->screen_height - 1, cacasink->bitmap,
      GST_VIDEO_FRAME_PLANE_DATA (&frame, 0));
  caca_refresh ();

  gst_video_frame_unmap (&frame);

  return GST_FLOW_OK;

  /* ERRORS */
invalid_frame:
  {
    GST_ERROR_OBJECT (cacasink, "invalid frame received");
    return GST_FLOW_ERROR;
  }
}
static void
transform_ayuv_ayuv (GstVideoFrame * frame, const gint * matrix)
{
  guint8 *data;
  gsize size;
  gint y, u, v;
  gint yc[4];
  gint uc[4];
  gint vc[4];

  if (matrix == NULL)
    return;

  data = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
  size = GST_VIDEO_FRAME_SIZE (frame);

  memcpy (yc, matrix, 4 * sizeof (gint));
  memcpy (uc, matrix + 4, 4 * sizeof (gint));
  memcpy (vc, matrix + 8, 4 * sizeof (gint));

  while (size > 0) {
    y = (data[1] * yc[0] + data[2] * yc[1] + data[3] * yc[2] + yc[3]) >> 8;
    u = (data[1] * uc[0] + data[2] * uc[1] + data[3] * uc[2] + uc[3]) >> 8;
    v = (data[1] * vc[0] + data[2] * vc[1] + data[3] * vc[2] + vc[3]) >> 8;

    data[1] = y;
    data[2] = u;
    data[3] = v;

    data += 4;
    size -= 4;
  }
}
Пример #18
0
static void
gst_video_balance_packed_rgb (GstVideoBalance * videobalance,
    GstVideoFrame * frame)
{
  gint i, j, height;
  gint width, stride, row_wrap;
  gint pixel_stride;
  guint8 *data;
  gint offsets[3];
  gint r, g, b;
  gint y, u, v;
  gint u_tmp, v_tmp;
  guint8 *tabley = videobalance->tabley;
  guint8 **tableu = videobalance->tableu;
  guint8 **tablev = videobalance->tablev;

  width = GST_VIDEO_FRAME_WIDTH (frame);
  height = GST_VIDEO_FRAME_HEIGHT (frame);

  offsets[0] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 0);
  offsets[1] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 1);
  offsets[2] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 2);

  data = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
  stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);

  pixel_stride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0);
  row_wrap = stride - pixel_stride * width;

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      r = data[offsets[0]];
      g = data[offsets[1]];
      b = data[offsets[2]];

      y = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 0, r, g, b);
      u_tmp = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 1, r, g, b);
      v_tmp = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 2, r, g, b);

      y = CLAMP (y, 0, 255);
      u_tmp = CLAMP (u_tmp, 0, 255);
      v_tmp = CLAMP (v_tmp, 0, 255);

      y = tabley[y];
      u = tableu[u_tmp][v_tmp];
      v = tablev[u_tmp][v_tmp];

      r = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 0, y, u, v);
      g = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 1, y, u, v);
      b = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 2, y, u, v);

      data[offsets[0]] = CLAMP (r, 0, 255);
      data[offsets[1]] = CLAMP (g, 0, 255);
      data[offsets[2]] = CLAMP (b, 0, 255);
      data += pixel_stride;
    }
    data += row_wrap;
  }
}
Пример #19
0
static GstFlowReturn
gst_streaktv_transform_frame (GstVideoFilter * vfilter,
    GstVideoFrame * in_frame, GstVideoFrame * out_frame)
{
  GstStreakTV *filter = GST_STREAKTV (vfilter);
  guint32 *src, *dest;
  gint i, cf;
  gint video_area, width, height;
  guint32 **planetable = filter->planetable;
  gint plane = filter->plane;
  guint stride_mask, stride_shift, stride;

  src = GST_VIDEO_FRAME_PLANE_DATA (in_frame, 0);
  dest = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0);

  width = GST_VIDEO_FRAME_WIDTH (in_frame);
  height = GST_VIDEO_FRAME_HEIGHT (in_frame);

  video_area = width * height;

  GST_OBJECT_LOCK (filter);
  if (filter->feedback) {
    stride_mask = 0xfcfcfcfc;
    stride = 8;
    stride_shift = 2;
  } else {
    stride_mask = 0xf8f8f8f8;
    stride = 4;
    stride_shift = 3;
  }

  for (i = 0; i < video_area; i++) {
    planetable[plane][i] = (src[i] & stride_mask) >> stride_shift;
  }

  cf = plane & (stride - 1);
  if (filter->feedback) {
    for (i = 0; i < video_area; i++) {
      dest[i] = planetable[cf][i]
          + planetable[cf + stride][i]
          + planetable[cf + stride * 2][i]
          + planetable[cf + stride * 3][i];
      planetable[plane][i] = (dest[i] & stride_mask) >> stride_shift;
    }
  } else {
    for (i = 0; i < video_area; i++) {
Пример #20
0
static void
gst_smpte_alpha_process_ayuv_ayuv (GstSMPTEAlpha * smpte,
    const GstVideoFrame * in_frame, GstVideoFrame * out_frame, GstMask * mask,
    gint border, gint pos)
{
  gint i, j;
  const guint32 *maskp;
  gint value;
  gint min, max;
  gint width, height;
  guint8 *in, *out;
  gint src_wrap, dest_wrap;

  if (border == 0)
    border++;

  min = pos - border;
  max = pos;
  GST_DEBUG_OBJECT (smpte, "pos %d, min %d, max %d, border %d", pos, min, max,
      border);

  maskp = mask->data;

  width = GST_VIDEO_FRAME_WIDTH (out_frame);
  height = GST_VIDEO_FRAME_HEIGHT (out_frame);

  in = GST_VIDEO_FRAME_PLANE_DATA (in_frame, 0);
  out = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0);
  src_wrap = GST_VIDEO_FRAME_PLANE_STRIDE (in_frame, 0) - (width << 2);
  dest_wrap = GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, 0) - (width << 2);

  /* we basically copy the source to dest but we scale the alpha channel with
   * the mask */
  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      value = *maskp++;
      *out++ = (*in++ * ((CLAMP (value, min, max) - min) << 8) / border) >> 8;
      *out++ = *in++;
      *out++ = *in++;
      *out++ = *in++;
    }
    in += src_wrap;
    out += dest_wrap;
  }
}
Пример #21
0
static void gub_copy_texture_d3d9(GUBGraphicContextD3D9 *gcontext, GstVideoInfo *video_info, GstBuffer *buffer, void *native_texture_ptr)
{
    static const GUID GUB_IID_IDirect3DTexture9 = { 0x85c31227, 0x3de5, 0x4f00, 0x9b, 0x3a, 0xf1, 0x1a, 0xc3, 0x8c, 0x18, 0xb5 };

    if (native_texture_ptr)
    {
        void *d3d_interface;
        IDirect3DTexture9* d3dtex = (IDirect3DTexture9*)native_texture_ptr;
        D3DLOCKED_RECT lr;
        GstVideoFrame video_frame;

        if (d3dtex->lpVtbl->QueryInterface(d3dtex, &GUB_IID_IDirect3DTexture9, &d3d_interface) != S_OK) {
            // This is not D3D9, we are probably inside the Editor in D3D11 mode and we assumed wrongly
            gub_log("I assumed this was D3D9 but it is not. Are you using the Unity Editor without -force-d3d9 ?");
            return;
        }
        d3dtex->lpVtbl->Release(d3dtex);
        if (d3dtex->lpVtbl->LockRect(d3dtex, 0, &lr, NULL, D3DLOCK_DISCARD) != D3D_OK)
            gub_log("Problem locking D3D texture");
        gst_video_frame_map(&video_frame, video_info, buffer, GST_MAP_READ);
        if (gcontext->crop_left == 0 && gcontext->crop_top == 0 && gcontext->crop_right == 0 && gcontext->crop_bottom == 0) {
            // No cropping
            memcpy((char*)lr.pBits, GST_VIDEO_FRAME_PLANE_DATA(&video_frame, 0), video_info->width * video_info->height * 4);
        }
        else {
            // Cropping
            int left = (int)(video_info->width  * gcontext->crop_left);
            int top = (int)(video_info->height * gcontext->crop_top);
            int width = (int)(video_info->width  * (1 - gcontext->crop_left - gcontext->crop_right));
            int height = (int)(video_info->height * (1 - gcontext->crop_top - gcontext->crop_bottom));
            char *dst_ptr = (char*)lr.pBits;
            char *src_ptr = (char *)GST_VIDEO_FRAME_PLANE_DATA(&video_frame, 0) + (top * video_info->width + left) * 4;
            int y;

            for (y = 0; y < height; y++, dst_ptr += lr.Pitch, src_ptr += video_info->width * 4) {
                memcpy(dst_ptr, src_ptr, width * 4);
            }
        }
        gst_video_frame_unmap(&video_frame);
        if (d3dtex->lpVtbl->UnlockRect(d3dtex, 0) != D3D_OK)
            gub_log("Problem unlocking D3D texture");
    }
}
Пример #22
0
static void
gst_color_effects_transform_rgb (GstColorEffects * filter,
    GstVideoFrame * frame)
{
  gint i, j;
  gint width, height;
  gint pixel_stride, row_stride, row_wrap;
  guint32 r, g, b;
  guint32 luma;
  gint offsets[3];
  guint8 *data;

  data = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
  offsets[0] = GST_VIDEO_FRAME_COMP_POFFSET (frame, 0);
  offsets[1] = GST_VIDEO_FRAME_COMP_POFFSET (frame, 1);
  offsets[2] = GST_VIDEO_FRAME_COMP_POFFSET (frame, 2);

  width = GST_VIDEO_FRAME_WIDTH (frame);
  height = GST_VIDEO_FRAME_HEIGHT (frame);

  row_stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
  pixel_stride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0);
  row_wrap = row_stride - pixel_stride * width;

  /* transform */

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      r = data[offsets[0]];
      g = data[offsets[1]];
      b = data[offsets[2]];
      if (filter->map_luma) {
        /* BT. 709 coefficients in B8 fixed point */
        /* 0.2126 R + 0.7152 G + 0.0722 B */
        luma = ((r << 8) * 54) + ((g << 8) * 183) + ((b << 8) * 19);
        luma >>= 16;            /* get integer part */
        luma *= 3;              /* times 3 to retrieve the correct pixel from
                                 * the lut */
        /* map luma to lookup table */
        /* src.luma |-> table[luma].rgb */
        data[offsets[0]] = filter->table[luma];
        data[offsets[1]] = filter->table[luma + 1];
        data[offsets[2]] = filter->table[luma + 2];
      } else {
        /* map each color component to the correspondent lut color */
        /* src.r |-> table[r].r */
        /* src.g |-> table[g].g */
        /* src.b |-> table[b].b */
        data[offsets[0]] = filter->table[r * 3];
        data[offsets[1]] = filter->table[g * 3 + 1];
        data[offsets[2]] = filter->table[b * 3 + 2];
      }
      data += pixel_stride;
    }
static void
gst_video_crop_transform_semi_planar (GstVideoCrop * vcrop,
    GstVideoFrame * in_frame, GstVideoFrame * out_frame)
{
  gint width, height;
  guint8 *y_out, *uv_out;
  guint8 *y_in, *uv_in;
  guint i, dx;

  width = GST_VIDEO_FRAME_WIDTH (out_frame);
  height = GST_VIDEO_FRAME_HEIGHT (out_frame);

  /* Y plane */
  y_in = GST_VIDEO_FRAME_PLANE_DATA (in_frame, 0);
  y_out = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0);

  /* UV plane */
  uv_in = GST_VIDEO_FRAME_PLANE_DATA (in_frame, 1);
  uv_out = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 1);

  y_in += vcrop->crop_top * GST_VIDEO_FRAME_PLANE_STRIDE (in_frame, 0) +
      vcrop->crop_left;
  dx = width;

  for (i = 0; i < height; ++i) {
    memcpy (y_out, y_in, dx);
    y_in += GST_VIDEO_FRAME_PLANE_STRIDE (in_frame, 0);
    y_out += GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, 0);
  }

  uv_in += (vcrop->crop_top / 2) * GST_VIDEO_FRAME_PLANE_STRIDE (in_frame, 1);
  uv_in += GST_ROUND_DOWN_2 (vcrop->crop_left);
  dx = GST_ROUND_UP_2 (width);

  for (i = 0; i < GST_ROUND_UP_2 (height) / 2; i++) {
    memcpy (uv_out, uv_in, dx);
    uv_in += GST_VIDEO_FRAME_PLANE_STRIDE (in_frame, 1);
    uv_out += GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, 1);
  }
}
static GstFlowReturn
gst_cairo_overlay_transform_frame_ip (GstVideoFilter * vfilter,
    GstVideoFrame * frame)
{
  GstCairoOverlay *overlay = GST_CAIRO_OVERLAY (vfilter);
  cairo_surface_t *surface;
  cairo_t *cr;
  cairo_format_t format;

  switch (GST_VIDEO_FRAME_FORMAT (frame)) {
    case GST_VIDEO_FORMAT_ARGB:
    case GST_VIDEO_FORMAT_BGRA:
      format = CAIRO_FORMAT_ARGB32;
      break;
    case GST_VIDEO_FORMAT_xRGB:
    case GST_VIDEO_FORMAT_BGRx:
      format = CAIRO_FORMAT_RGB24;
      break;
    case GST_VIDEO_FORMAT_RGB16:
      format = CAIRO_FORMAT_RGB16_565;
      break;
    default:
    {
      GST_WARNING ("No matching cairo format for %s",
          gst_video_format_to_string (GST_VIDEO_FRAME_FORMAT (frame)));
      return GST_FLOW_ERROR;
    }
  }

  surface =
      cairo_image_surface_create_for_data (GST_VIDEO_FRAME_PLANE_DATA (frame,
          0), format, GST_VIDEO_FRAME_WIDTH (frame),
      GST_VIDEO_FRAME_HEIGHT (frame), GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0));

  if (G_UNLIKELY (!surface))
    return GST_FLOW_ERROR;

  cr = cairo_create (surface);
  if (G_UNLIKELY (!cr)) {
    cairo_surface_destroy (surface);
    return GST_FLOW_ERROR;
  }

  g_signal_emit (overlay, gst_cairo_overlay_signals[SIGNAL_DRAW], 0,
      cr, GST_BUFFER_PTS (frame->buffer), GST_BUFFER_DURATION (frame->buffer),
      NULL);

  cairo_destroy (cr);
  cairo_surface_destroy (surface);

  return GST_FLOW_OK;
}
Пример #25
0
/* chain function
 * this function does the actual processing
 */
static GstFlowReturn
gst_x265_enc_handle_frame (GstVideoEncoder * video_enc,
    GstVideoCodecFrame * frame)
{
  GstX265Enc *encoder = GST_X265_ENC (video_enc);
  GstVideoInfo *info = &encoder->input_state->info;
  GstFlowReturn ret;
  x265_picture pic_in;
  guint32 i_nal, i;
  FrameData *fdata;
  gint nplanes = 0;

  if (G_UNLIKELY (encoder->x265enc == NULL))
    goto not_inited;

  /* set up input picture */
  x265_picture_init (&encoder->x265param, &pic_in);

  fdata = gst_x265_enc_queue_frame (encoder, frame, info);
  if (!fdata)
    goto invalid_frame;

  pic_in.colorSpace =
      gst_x265_enc_gst_to_x265_video_format (info->finfo->format, &nplanes);
  for (i = 0; i < nplanes; i++) {
    pic_in.planes[i] = GST_VIDEO_FRAME_PLANE_DATA (&fdata->vframe, i);
    pic_in.stride[i] = GST_VIDEO_FRAME_COMP_STRIDE (&fdata->vframe, i);
  }

  pic_in.sliceType = X265_TYPE_AUTO;
  pic_in.pts = frame->pts;
  pic_in.dts = frame->dts;
  pic_in.bitDepth = info->finfo->depth[0];
  pic_in.userData = GINT_TO_POINTER (frame->system_frame_number);

  ret = gst_x265_enc_encode_frame (encoder, &pic_in, frame, &i_nal, TRUE);

  /* input buffer is released later on */
  return ret;

/* ERRORS */
not_inited:
  {
    GST_WARNING_OBJECT (encoder, "Got buffer before set_caps was called");
    return GST_FLOW_NOT_NEGOTIATED;
  }
invalid_frame:
  {
    GST_ERROR_OBJECT (encoder, "Failed to map frame");
    return GST_FLOW_ERROR;
  }
}
Пример #26
0
/* Actual processing. */
static GstFlowReturn
gst_burn_transform_frame (GstVideoFilter * vfilter,
    GstVideoFrame * in_frame, GstVideoFrame * out_frame)
{
  GstBurn *filter = GST_BURN (vfilter);
  gint video_size, adjustment, width, height;
  guint32 *src, *dest;
  GstClockTime timestamp;
  gint64 stream_time;

  src = GST_VIDEO_FRAME_PLANE_DATA (in_frame, 0);
  dest = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0);

  width = GST_VIDEO_FRAME_WIDTH (in_frame);
  height = GST_VIDEO_FRAME_HEIGHT (in_frame);

  video_size = width * height;

  /* GstController: update the properties */
  timestamp = GST_BUFFER_TIMESTAMP (in_frame->buffer);
  stream_time =
      gst_segment_to_stream_time (&GST_BASE_TRANSFORM (filter)->segment,
      GST_FORMAT_TIME, timestamp);

  GST_DEBUG_OBJECT (filter, "sync to %" GST_TIME_FORMAT,
      GST_TIME_ARGS (timestamp));

  if (GST_CLOCK_TIME_IS_VALID (stream_time))
    gst_object_sync_values (GST_OBJECT (filter), stream_time);

  GST_OBJECT_LOCK (filter);
  adjustment = filter->adjustment;
  GST_OBJECT_UNLOCK (filter);

  /*** Now the image processing work.... ***/
  orc_gaudi_burn (dest, src, adjustment, video_size);

  return GST_FLOW_OK;
}
Пример #27
0
static void
gst_gamma_packed_rgb_ip (GstGamma * gamma, GstVideoFrame * frame)
{
  gint i, j, height;
  gint width, stride, row_wrap;
  gint pixel_stride;
  const guint8 *table = gamma->gamma_table;
  gint offsets[3];
  gint r, g, b;
  gint y, u, v;
  guint8 *data;

  data = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
  stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
  width = GST_VIDEO_FRAME_COMP_WIDTH (frame, 0);
  height = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 0);

  offsets[0] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 0);
  offsets[1] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 1);
  offsets[2] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 2);

  pixel_stride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0);
  row_wrap = stride - pixel_stride * width;

  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      r = data[offsets[0]];
      g = data[offsets[1]];
      b = data[offsets[2]];

      y = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 0, r, g, b);
      u = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 1, r, g, b);
      v = APPLY_MATRIX (cog_rgb_to_ycbcr_matrix_8bit_sdtv, 2, r, g, b);

      y = table[CLAMP (y, 0, 255)];
      r = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 0, y, u, v);
      g = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 1, y, u, v);
      b = APPLY_MATRIX (cog_ycbcr_to_rgb_matrix_8bit_sdtv, 2, y, u, v);

      data[offsets[0]] = CLAMP (r, 0, 255);
      data[offsets[1]] = CLAMP (g, 0, 255);
      data[offsets[2]] = CLAMP (b, 0, 255);
      data += pixel_stride;
    }
    data += row_wrap;
  }
}
Пример #28
0
static void
fill_frame_planar16_4_generic (GstVideoFrame * frame, opj_image_t * image)
{
  gint x, y, w, h;
  guint16 *data_out, *tmp;
  const gint *data_in[4];
  gint dstride;
  gint dx[4], dy[4], shift[4];

  w = GST_VIDEO_FRAME_WIDTH (frame);
  h = GST_VIDEO_FRAME_HEIGHT (frame);
  data_out = (guint16 *) GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
  dstride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0) / 2;

  data_in[0] = image->comps[0].data;
  data_in[1] = image->comps[1].data;
  data_in[2] = image->comps[2].data;
  data_in[3] = image->comps[3].data;

  dx[0] = image->comps[0].dx;
  dx[1] = image->comps[1].dx;
  dx[2] = image->comps[2].dx;
  dx[3] = image->comps[3].dx;

  dy[0] = image->comps[0].dy;
  dy[1] = image->comps[1].dy;
  dy[2] = image->comps[2].dy;
  dy[3] = image->comps[3].dy;

  shift[0] = 16 - image->comps[0].prec;
  shift[1] = 16 - image->comps[1].prec;
  shift[2] = 16 - image->comps[2].prec;
  shift[3] = 16 - image->comps[3].prec;

  for (y = 0; y < h; y++) {
    tmp = data_out;

    for (x = 0; x < w; x++) {
      tmp[0] = data_in[3][((y / dy[3]) * w + x) / dx[3]] << shift[3];
      tmp[1] = data_in[0][((y / dy[0]) * w + x) / dx[0]] << shift[0];
      tmp[2] = data_in[1][((y / dy[1]) * w + x) / dx[1]] << shift[1];
      tmp[3] = data_in[2][((y / dy[2]) * w + x) / dx[2]] << shift[2];
      tmp += 4;
    }
    data_out += dstride;
  }
}
static gboolean
gst_space_scope_render (GstAudioVisualizer * base, GstBuffer * audio,
                        GstVideoFrame * video)
{
    GstSpaceScope *scope = GST_SPACE_SCOPE (base);
    GstMapInfo amap;
    guint num_samples;

    gst_buffer_map (audio, &amap, GST_MAP_READ);

    num_samples =
        amap.size / (GST_AUDIO_INFO_CHANNELS (&base->ainfo) * sizeof (gint16));
    scope->process (base, (guint32 *) GST_VIDEO_FRAME_PLANE_DATA (video, 0),
                    (gint16 *) amap.data, num_samples);
    gst_buffer_unmap (audio, &amap);
    return TRUE;
}
PassRefPtr<BitmapTexture> MediaPlayerPrivateGStreamerBase::updateTexture(TextureMapper* textureMapper)
{
    WTF::GMutexLocker<GMutex> lock(m_sampleMutex);
    if (!GST_IS_SAMPLE(m_sample.get()))
        return nullptr;

    GstCaps* caps = gst_sample_get_caps(m_sample.get());
    if (!caps)
        return nullptr;

    GstVideoInfo videoInfo;
    gst_video_info_init(&videoInfo);
    if (!gst_video_info_from_caps(&videoInfo, caps))
        return nullptr;

    IntSize size = IntSize(GST_VIDEO_INFO_WIDTH(&videoInfo), GST_VIDEO_INFO_HEIGHT(&videoInfo));
    RefPtr<BitmapTexture> texture = textureMapper->acquireTextureFromPool(size, GST_VIDEO_INFO_HAS_ALPHA(&videoInfo) ? BitmapTexture::SupportsAlpha : BitmapTexture::NoFlag);
    GstBuffer* buffer = gst_sample_get_buffer(m_sample.get());

#if GST_CHECK_VERSION(1, 1, 0)
    GstVideoGLTextureUploadMeta* meta;
    if ((meta = gst_buffer_get_video_gl_texture_upload_meta(buffer))) {
        if (meta->n_textures == 1) { // BRGx & BGRA formats use only one texture.
            const BitmapTextureGL* textureGL = static_cast<const BitmapTextureGL*>(texture.get());
            guint ids[4] = { textureGL->id(), 0, 0, 0 };

            if (gst_video_gl_texture_upload_meta_upload(meta, ids))
                return texture;
        }
    }
#endif

    // Right now the TextureMapper only supports chromas with one plane
    ASSERT(GST_VIDEO_INFO_N_PLANES(&videoInfo) == 1);

    GstVideoFrame videoFrame;
    if (!gst_video_frame_map(&videoFrame, &videoInfo, buffer, GST_MAP_READ))
        return nullptr;

    int stride = GST_VIDEO_FRAME_PLANE_STRIDE(&videoFrame, 0);
    const void* srcData = GST_VIDEO_FRAME_PLANE_DATA(&videoFrame, 0);
    texture->updateContents(srcData, WebCore::IntRect(WebCore::IntPoint(0, 0), size), WebCore::IntPoint(0, 0), stride, BitmapTexture::UpdateCannotModifyOriginalImageData);
    gst_video_frame_unmap(&videoFrame);

    return texture;
}