static GstCaps *
typefind_data (const guint8 * data, gsize data_size,
    GstTypeFindProbability * prob)
{
  GstBuffer *buf;
  GstCaps *caps;

  GST_MEMDUMP ("typefind data", data, data_size);
  buf = gst_buffer_new ();
  gst_buffer_append_memory (buf,
      gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
          (guint8 *) data, data_size, 0, data_size, NULL, NULL));
  GST_BUFFER_OFFSET (buf) = 0;

  caps = gst_type_find_helper_for_buffer (NULL, buf, prob);
  GST_INFO ("caps: %" GST_PTR_FORMAT ", probability=%u", caps, *prob);

  gst_buffer_unref (buf);

  return caps;
}
static gboolean
gst_core_media_buffer_wrap_block_buffer (GstBuffer * buf,
    CMBlockBufferRef block_buf)
{
  OSStatus status;
  gchar *data = NULL;
  UInt32 size;

  status = CMBlockBufferGetDataPointer (block_buf, 0, 0, 0, &data);
  if (status != noErr) {
    return FALSE;
  }

  size = CMBlockBufferGetDataLength (block_buf);

  gst_buffer_append_memory (buf,
      gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, data,
          size, 0, size, NULL, NULL));

  return TRUE;
}
static GstFlowReturn gst_imx_v4l2_buffer_pool_acquire_buffer(GstBufferPool *bpool, GstBuffer **buffer, G_GNUC_UNUSED GstBufferPoolAcquireParams *params)
{
	GstImxV4l2BufferPool *pool = GST_IMX_V4L2_BUFFER_POOL(bpool);
	struct v4l2_buffer vbuffer;
	GstBuffer *buf;
	GstImxV4l2Meta *meta;

	if (GST_BUFFER_POOL_IS_FLUSHING(bpool))
		return GST_FLOW_FLUSHING;

	memset(&vbuffer, 0, sizeof(vbuffer));
	vbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	vbuffer.memory = V4L2_MEMORY_MMAP;

	if (ioctl(GST_IMX_FD_OBJECT_GET_FD(pool->fd_obj_v4l), VIDIOC_DQBUF, &vbuffer) < 0)
	{
		GST_ERROR_OBJECT(pool, "VIDIOC_DQBUF failed: %s", g_strerror(errno));
		return GST_FLOW_ERROR;
	}
	buf = pool->buffers[vbuffer.index];
	GST_DEBUG_OBJECT(pool, "dqbuf %u %p", vbuffer.index, (gpointer)buf);
	pool->buffers[vbuffer.index] = NULL;

	g_assert(buf);

	meta = GST_IMX_V4L2_META_GET(buf);
	g_assert(meta);

	gst_buffer_remove_all_memory(buf);
	gst_buffer_append_memory(buf,
			gst_memory_new_wrapped(0,
				meta->mem, meta->vbuffer.length, 0,
				vbuffer.bytesused, NULL, NULL));

	GST_BUFFER_TIMESTAMP(buf) = GST_TIMEVAL_TO_TIME(vbuffer.timestamp);

	*buffer = buf;

	return GST_FLOW_OK;
}
Beispiel #4
0
static GstFlowReturn
gst_v4l2_buffer_pool_alloc_buffer (GstBufferPool * bpool, GstBuffer ** buffer,
    GstBufferPoolAcquireParams * params)
{
  GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
  GstBuffer *newbuf;
  GstV4l2Meta *meta;
  GstV4l2Object *obj;
  GstVideoInfo *info;
  guint index;

  obj = pool->obj;
  info = &obj->info;

  switch (obj->mode) {
    case GST_V4L2_IO_RW:
    {
      newbuf =
          gst_buffer_new_allocate (pool->allocator, pool->size, &pool->params);
      break;
    }
    case GST_V4L2_IO_MMAP:
    {
      newbuf = gst_buffer_new ();
      meta = GST_V4L2_META_ADD (newbuf);

      index = pool->num_allocated;

      GST_LOG_OBJECT (pool, "creating buffer %u, %p", index, newbuf);

      meta->vbuffer.index = index;
      meta->vbuffer.type = obj->type;
      meta->vbuffer.memory = V4L2_MEMORY_MMAP;

      if (v4l2_ioctl (pool->video_fd, VIDIOC_QUERYBUF, &meta->vbuffer) < 0)
        goto querybuf_failed;

      GST_LOG_OBJECT (pool, "  index:     %u", meta->vbuffer.index);
      GST_LOG_OBJECT (pool, "  type:      %d", meta->vbuffer.type);
      GST_LOG_OBJECT (pool, "  bytesused: %u", meta->vbuffer.bytesused);
      GST_LOG_OBJECT (pool, "  flags:     %08x", meta->vbuffer.flags);
      GST_LOG_OBJECT (pool, "  field:     %d", meta->vbuffer.field);
      GST_LOG_OBJECT (pool, "  memory:    %d", meta->vbuffer.memory);
      if (meta->vbuffer.memory == V4L2_MEMORY_MMAP)
        GST_LOG_OBJECT (pool, "  MMAP offset:  %u", meta->vbuffer.m.offset);
      GST_LOG_OBJECT (pool, "  length:    %u", meta->vbuffer.length);
      GST_LOG_OBJECT (pool, "  input:     %u", meta->vbuffer.input);

      meta->mem = v4l2_mmap (0, meta->vbuffer.length,
          PROT_READ | PROT_WRITE, MAP_SHARED, pool->video_fd,
          meta->vbuffer.m.offset);
      if (meta->mem == MAP_FAILED)
        goto mmap_failed;

      gst_buffer_append_memory (newbuf,
          gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
              meta->mem, meta->vbuffer.length, 0, meta->vbuffer.length, NULL,
              NULL));

      /* add metadata to raw video buffers */
      if (pool->add_videometa && info->finfo) {
        gsize offset[GST_VIDEO_MAX_PLANES];
        gint stride[GST_VIDEO_MAX_PLANES];

        offset[0] = 0;
        stride[0] = obj->bytesperline;

        GST_DEBUG_OBJECT (pool, "adding video meta, stride %d", stride[0]);
        gst_buffer_add_video_meta_full (newbuf, GST_VIDEO_FRAME_FLAG_NONE,
            GST_VIDEO_INFO_FORMAT (info), GST_VIDEO_INFO_WIDTH (info),
            GST_VIDEO_INFO_HEIGHT (info), GST_VIDEO_INFO_N_PLANES (info),
            offset, stride);
      }
      break;
    }
    case GST_V4L2_IO_USERPTR:
    default:
      g_assert_not_reached ();
      break;
  }

  pool->num_allocated++;

  *buffer = newbuf;

  return GST_FLOW_OK;

  /* ERRORS */
querybuf_failed:
  {
    gint errnosave = errno;

    GST_WARNING ("Failed QUERYBUF: %s", g_strerror (errnosave));
    gst_buffer_unref (newbuf);
    errno = errnosave;
    return GST_FLOW_ERROR;
  }
mmap_failed:
  {
    gint errnosave = errno;

    GST_WARNING ("Failed to mmap: %s", g_strerror (errnosave));
    gst_buffer_unref (newbuf);
    errno = errnosave;
    return GST_FLOW_ERROR;
  }
}
GstBuffer *
gst_core_video_buffer_new (CVBufferRef cvbuf, GstVideoInfo * vinfo)
{
  CVPixelBufferRef pixbuf = NULL;
  GstBuffer *buf;
  GstCoreVideoMeta *meta;
  guint n_planes;
  gsize offset[GST_VIDEO_MAX_PLANES];
  gint stride[GST_VIDEO_MAX_PLANES];

  if (CFGetTypeID (cvbuf) != CVPixelBufferGetTypeID ())
    /* TODO: Do we need to handle other buffer types? */
    goto error;

  pixbuf = (CVPixelBufferRef) cvbuf;

  if (CVPixelBufferLockBaseAddress (pixbuf,
          kCVPixelBufferLock_ReadOnly) != kCVReturnSuccess) {
    goto error;
  }

  buf = gst_buffer_new ();

  /* add the corevideo meta to free the underlying corevideo buffer */
  meta = (GstCoreVideoMeta *) gst_buffer_add_meta (buf,
      gst_core_video_meta_get_info (), NULL);
  meta->cvbuf = CVBufferRetain (cvbuf);
  meta->pixbuf = pixbuf;

  /* set stride, offset and size */
  memset (&offset, 0, sizeof (offset));
  memset (&stride, 0, sizeof (stride));

  if (CVPixelBufferIsPlanar (pixbuf)) {
    int i, size, off;

    n_planes = CVPixelBufferGetPlaneCount (pixbuf);
    off = 0;
    for (i = 0; i < n_planes; ++i) {
      stride[i] = CVPixelBufferGetBytesPerRowOfPlane (pixbuf, i);
      size = stride[i] * CVPixelBufferGetHeightOfPlane (pixbuf, i);
      offset[i] = off;
      off += size;

      gst_buffer_append_memory (buf,
          gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
              CVPixelBufferGetBaseAddressOfPlane (pixbuf, i), size, 0, size,
              NULL, NULL));
    }
  } else {
    int size;

    n_planes = 1;
    stride[0] = CVPixelBufferGetBytesPerRow (pixbuf);
    offset[0] = 0;
    size = stride[0] * vinfo->height;

    gst_buffer_append_memory (buf,
        gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
            CVPixelBufferGetBaseAddress (pixbuf), size, 0, size, NULL, NULL));
  }

  if (vinfo) {
    GstVideoMeta *video_meta;

    video_meta =
        gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE,
        vinfo->finfo->format, vinfo->width, vinfo->height,
        n_planes, offset, stride);
  }

  return buf;

error:
  return NULL;
}
static gboolean
gst_core_media_buffer_wrap_pixel_buffer (GstBuffer * buf, GstVideoInfo * info,
    CVPixelBufferRef pixel_buf, gboolean * has_padding, gboolean map)
{
  guint n_planes;
  gsize offset[GST_VIDEO_MAX_PLANES] = { 0 };
  gint stride[GST_VIDEO_MAX_PLANES] = { 0 };
  GstVideoMeta *video_meta;
  UInt32 size;

  if (map && CVPixelBufferLockBaseAddress (pixel_buf, 0) != kCVReturnSuccess) {
    GST_ERROR ("Could not lock pixel buffer base address");
    return FALSE;
  }

  *has_padding = FALSE;

  if (CVPixelBufferIsPlanar (pixel_buf)) {
    gint i, size = 0, plane_offset = 0;

    n_planes = CVPixelBufferGetPlaneCount (pixel_buf);
    for (i = 0; i < n_planes; i++) {
      stride[i] = CVPixelBufferGetBytesPerRowOfPlane (pixel_buf, i);

      if (stride[i] != GST_VIDEO_INFO_PLANE_STRIDE (info, i)) {
        *has_padding = TRUE;
      }

      size = stride[i] * CVPixelBufferGetHeightOfPlane (pixel_buf, i);
      offset[i] = plane_offset;
      plane_offset += size;

      if (map) {
        gst_buffer_append_memory (buf,
            gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
                CVPixelBufferGetBaseAddressOfPlane (pixel_buf, i), size, 0,
                size, NULL, NULL));
      }
    }
  } else {

    n_planes = 1;
    stride[0] = CVPixelBufferGetBytesPerRow (pixel_buf);
    offset[0] = 0;
    size = stride[0] * CVPixelBufferGetHeight (pixel_buf);

    if (map) {
      gst_buffer_append_memory (buf,
          gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
              CVPixelBufferGetBaseAddress (pixel_buf), size, 0, size, NULL,
              NULL));
    }
  }

  video_meta =
      gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE,
      GST_VIDEO_INFO_FORMAT (info), info->width, info->height, n_planes, offset,
      stride);

  return TRUE;
}
Beispiel #7
0
static GstXvImageMeta *
gst_buffer_add_xvimage_meta (GstBuffer * buffer, GstXvImageBufferPool * xvpool)
{
  GstXvImageSink *xvimagesink;
  int (*handler) (Display *, XErrorEvent *);
  gboolean success = FALSE;
  GstXContext *xcontext;
  GstXvImageMeta *meta;
  gint width, height, im_format, align = 15, offset;
  GstXvImageBufferPoolPrivate *priv;

  priv = xvpool->priv;
  xvimagesink = xvpool->sink;
  xcontext = xvimagesink->xcontext;

  width = priv->padded_width;
  height = priv->padded_height;
  im_format = priv->im_format;

  meta =
      (GstXvImageMeta *) gst_buffer_add_meta (buffer, GST_XVIMAGE_META_INFO,
      NULL);
#ifdef HAVE_XSHM
  meta->SHMInfo.shmaddr = ((void *) -1);
  meta->SHMInfo.shmid = -1;
#endif
  meta->x = priv->align.padding_left;
  meta->y = priv->align.padding_top;
  meta->width = priv->info.width;
  meta->height = priv->info.height;
  meta->sink = gst_object_ref (xvimagesink);
  meta->im_format = im_format;

  GST_DEBUG_OBJECT (xvimagesink, "creating image %p (%dx%d)", buffer,
      width, height);

  g_mutex_lock (&xvimagesink->x_lock);

  /* Setting an error handler to catch failure */
  error_caught = FALSE;
  handler = XSetErrorHandler (gst_xvimagesink_handle_xerror);

#ifdef HAVE_XSHM
  if (xcontext->use_xshm) {
    int expected_size;

    meta->xvimage = XvShmCreateImage (xcontext->disp,
        xcontext->xv_port_id, im_format, NULL, width, height, &meta->SHMInfo);
    if (!meta->xvimage || error_caught) {
      g_mutex_unlock (&xvimagesink->x_lock);

      /* Reset error flag */
      error_caught = FALSE;

      /* Push a warning */
      GST_ELEMENT_WARNING (xvimagesink, RESOURCE, WRITE,
          ("Failed to create output image buffer of %dx%d pixels",
              width, height),
          ("could not XShmCreateImage a %dx%d image", width, height));

      /* Retry without XShm */
      xvimagesink->xcontext->use_xshm = FALSE;

      /* Hold X mutex again to try without XShm */
      g_mutex_lock (&xvimagesink->x_lock);
      goto no_xshm;
    }

    /* we have to use the returned data_size for our shm size */
    meta->size = meta->xvimage->data_size;
    GST_LOG_OBJECT (xvimagesink, "XShm image size is %" G_GSIZE_FORMAT,
        meta->size);

    /* calculate the expected size.  This is only for sanity checking the
     * number we get from X. */
    switch (im_format) {
      case GST_MAKE_FOURCC ('I', '4', '2', '0'):
      case GST_MAKE_FOURCC ('Y', 'V', '1', '2'):
      {
        gint pitches[3];
        gint offsets[3];
        guint plane;

        offsets[0] = 0;
        pitches[0] = GST_ROUND_UP_4 (width);
        offsets[1] = offsets[0] + pitches[0] * GST_ROUND_UP_2 (height);
        pitches[1] = GST_ROUND_UP_8 (width) / 2;
        offsets[2] = offsets[1] + pitches[1] * GST_ROUND_UP_2 (height) / 2;
        pitches[2] = GST_ROUND_UP_8 (pitches[0]) / 2;

        expected_size = offsets[2] + pitches[2] * GST_ROUND_UP_2 (height) / 2;

        for (plane = 0; plane < meta->xvimage->num_planes; plane++) {
          GST_DEBUG_OBJECT (xvimagesink,
              "Plane %u has a expected pitch of %d bytes, " "offset of %d",
              plane, pitches[plane], offsets[plane]);
        }
        break;
      }
      case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
      case GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y'):
        expected_size = height * GST_ROUND_UP_4 (width * 2);
        break;
      default:
        expected_size = 0;
        break;
    }
    if (expected_size != 0 && meta->size != expected_size) {
      GST_WARNING_OBJECT (xvimagesink,
          "unexpected XShm image size (got %" G_GSIZE_FORMAT ", expected %d)",
          meta->size, expected_size);
    }

    /* Be verbose about our XvImage stride */
    {
      guint plane;

      for (plane = 0; plane < meta->xvimage->num_planes; plane++) {
        GST_DEBUG_OBJECT (xvimagesink, "Plane %u has a pitch of %d bytes, "
            "offset of %d", plane, meta->xvimage->pitches[plane],
            meta->xvimage->offsets[plane]);
      }
    }

    /* get shared memory */
    meta->SHMInfo.shmid =
        shmget (IPC_PRIVATE, meta->size + align, IPC_CREAT | 0777);
    if (meta->SHMInfo.shmid == -1)
      goto shmget_failed;

    /* attach */
    meta->SHMInfo.shmaddr = shmat (meta->SHMInfo.shmid, NULL, 0);
    if (meta->SHMInfo.shmaddr == ((void *) -1))
      goto shmat_failed;

    /* now we can set up the image data */
    meta->xvimage->data = meta->SHMInfo.shmaddr;
    meta->SHMInfo.readOnly = FALSE;

    if (XShmAttach (xcontext->disp, &meta->SHMInfo) == 0)
      goto xattach_failed;

    XSync (xcontext->disp, FALSE);

    /* Delete the shared memory segment as soon as we everyone is attached.
     * This way, it will be deleted as soon as we detach later, and not
     * leaked if we crash. */
    shmctl (meta->SHMInfo.shmid, IPC_RMID, NULL);

    GST_DEBUG_OBJECT (xvimagesink, "XServer ShmAttached to 0x%x, id 0x%lx",
        meta->SHMInfo.shmid, meta->SHMInfo.shmseg);
  } else
  no_xshm:
#endif /* HAVE_XSHM */
  {
    meta->xvimage = XvCreateImage (xcontext->disp,
        xcontext->xv_port_id, im_format, NULL, width, height);
    if (!meta->xvimage || error_caught)
      goto create_failed;

    /* we have to use the returned data_size for our image size */
    meta->size = meta->xvimage->data_size;
    meta->xvimage->data = g_malloc (meta->size + align);

    XSync (xcontext->disp, FALSE);
  }

  if ((offset = ((guintptr) meta->xvimage->data & align)))
    offset = (align + 1) - offset;

  GST_DEBUG_OBJECT (xvimagesink, "memory %p, align %d, offset %d",
      meta->xvimage->data, align, offset);

  /* Reset error handler */
  error_caught = FALSE;
  XSetErrorHandler (handler);

  gst_buffer_append_memory (buffer,
      gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, meta->xvimage->data,
          meta->size + align, offset, meta->size, NULL, NULL));

  g_mutex_unlock (&xvimagesink->x_lock);

  success = TRUE;

beach:
  if (!success)
    meta = NULL;

  return meta;

  /* ERRORS */
create_failed:
  {
    g_mutex_unlock (&xvimagesink->x_lock);
    /* Reset error handler */
    error_caught = FALSE;
    XSetErrorHandler (handler);
    /* Push an error */
    GST_ELEMENT_ERROR (xvimagesink, RESOURCE, WRITE,
        ("Failed to create output image buffer of %dx%d pixels",
            width, height),
        ("could not XvShmCreateImage a %dx%d image", width, height));
    goto beach;
  }
#ifdef HAVE_XSHM
shmget_failed:
  {
    g_mutex_unlock (&xvimagesink->x_lock);
    GST_ELEMENT_ERROR (xvimagesink, RESOURCE, WRITE,
        ("Failed to create output image buffer of %dx%d pixels",
            width, height),
        ("could not get shared memory of %" G_GSIZE_FORMAT " bytes",
            meta->size));
    goto beach;
  }
shmat_failed:
  {
    g_mutex_unlock (&xvimagesink->x_lock);
    GST_ELEMENT_ERROR (xvimagesink, RESOURCE, WRITE,
        ("Failed to create output image buffer of %dx%d pixels",
            width, height), ("Failed to shmat: %s", g_strerror (errno)));
    /* Clean up the shared memory segment */
    shmctl (meta->SHMInfo.shmid, IPC_RMID, NULL);
    goto beach;
  }
xattach_failed:
  {
    /* Clean up the shared memory segment */
    shmctl (meta->SHMInfo.shmid, IPC_RMID, NULL);
    g_mutex_unlock (&xvimagesink->x_lock);

    GST_ELEMENT_ERROR (xvimagesink, RESOURCE, WRITE,
        ("Failed to create output image buffer of %dx%d pixels",
            width, height), ("Failed to XShmAttach"));
    goto beach;
  }
#endif
}
static void
flush_data (GstRtpQDM2Depay * depay)
{
  guint i;
  guint avail;

  if ((avail = gst_adapter_available (depay->adapter)))
    gst_adapter_flush (depay->adapter, avail);

  GST_DEBUG ("Flushing %d packets", depay->nbpackets);

  for (i = 0; depay->packets[i]; i++) {
    QDM2Packet *pack = depay->packets[i];
    guint32 crc = 0;
    int i = 0;
    GstBuffer *buf;
    guint8 *data;

    /* CRC is the sum of everything (including first bytes) */

    data = pack->data;

    if (G_UNLIKELY (data == NULL))
      continue;

    /* If the packet size is bigger than 0xff, we need 2 bytes to store the size */
    if (depay->packetsize > 0xff) {
      /* Expanded size 0x02 | 0x80 */
      data[0] = 0x82;
      GST_WRITE_UINT16_BE (data + 1, depay->packetsize - 3);
    } else {
      data[0] = 0x2;
      data[1] = depay->packetsize - 2;
    }

    /* Calculate CRC */
    for (; i < depay->packetsize; i++)
      crc += data[i];

    GST_DEBUG ("CRC is 0x%x", crc);

    /* Write CRC */
    if (depay->packetsize > 0xff)
      GST_WRITE_UINT16_BE (data + 3, crc);
    else
      GST_WRITE_UINT16_BE (data + 2, crc);

    GST_MEMDUMP ("Extracted packet", data, depay->packetsize);

    buf = gst_buffer_new ();
    gst_buffer_append_memory (buf,
        gst_memory_new_wrapped (0, data, depay->packetsize, 0,
            depay->packetsize, data, g_free));

    gst_adapter_push (depay->adapter, buf);

    if (pack->data) {
      pack->data = NULL;
    }
  }
}
Beispiel #9
0
/*
 * couple gstreamer tightly by lock-stepping
 */
static int pipeline_push(struct videnc_state *st, const struct vidframe *frame)
{
	GstBuffer *buffer;
	uint8_t *data;
	size_t size;
	GstFlowReturn ret;
	int err = 0;

#if 1
	/* XXX: should not block the function here */

	/*
	 * Wait "start feed".
	 */
	pthread_mutex_lock(&st->streamer.wait.mutex);
	if (st->streamer.wait.flag == 1) {
		pthread_cond_wait(&st->streamer.wait.cond,
				  &st->streamer.wait.mutex);
	}
	if (st->streamer.eos.flag == -1)
		/* error */
		err = ENODEV;
	pthread_mutex_unlock(&st->streamer.wait.mutex);
	if (err)
		return err;
#endif

	/*
	 * Copy frame into buffer for gstreamer
	 */

	/* NOTE: I420 (YUV420P): hardcoded. */
	size = frame->linesize[0] * frame->size.h
		+ frame->linesize[1] * frame->size.h * 0.5
		+ frame->linesize[2] * frame->size.h * 0.5;

	/* allocate memory; memory is freed within callback of
	   gst_memory_new_wrapped of gst_video_push */
	data = g_try_malloc(size);
	if (!data)
		return ENOMEM;

	/* copy content of frame */
	size = 0;
	memcpy(&data[size], frame->data[0],
	       frame->linesize[0] * frame->size.h);
	size += frame->linesize[0] * frame->size.h;
	memcpy(&data[size], frame->data[1],
	       frame->linesize[1] * frame->size.h * 0.5);
	size += frame->linesize[1] * frame->size.h * 0.5;
	memcpy(&data[size], frame->data[2],
	       frame->linesize[2] * frame->size.h * 0.5);
	size += frame->linesize[2] * frame->size.h * 0.5;

	/* Wrap memory in a gstreamer buffer */
	buffer = gst_buffer_new();
	gst_buffer_insert_memory(buffer, -1,
				 gst_memory_new_wrapped (0, data, size, 0,
							 size, data, g_free));

	/*
	 * Push data and EOS into gstreamer.
	 */

	ret = gst_app_src_push_buffer(st->streamer.source, buffer);
	if (ret != GST_FLOW_OK) {
		warning("gst_video: pushing buffer failed\n");
		err = EPROTO;
		goto out;
	}

#if 0
	ret = gst_app_src_end_of_stream(st->streamer.source);
	if (ret != GST_FLOW_OK) {
		warning("gst_video: pushing EOS failed\n");
		err = EPROTO;
		goto out;
	}
#endif


#if 0
	/*
	 * Wait "processing done".
	 */
	pthread_mutex_lock(&st->streamer.eos.mutex);
	if (st->streamer.eos.flag == 0)
		/* will returns with EOS (1) or error (-1) */
		pthread_cond_wait(&st->streamer.wait.cond,
				  &st->streamer.wait.mutex);
	if (st->streamer.eos.flag == 1)
		/* reset eos */
		st->streamer.eos.flag = 0;
	else
		/* error */
		err = -1;
	pthread_mutex_unlock(&st->streamer.wait.mutex);
#endif


 out:
	return err;
}
GstBuffer *
gst_core_video_buffer_new (GstCoreMediaCtx * ctx, CVBufferRef cvbuf,
    GstVideoInfo * vinfo)
{
  GstCVApi *cv = ctx->cv;
  void *data;
  size_t size;
  CVPixelBufferRef pixbuf = NULL;
  GstBuffer *buf;
  GstCoreVideoMeta *meta;
  guint width, height, n_planes, i;
  gsize offset[GST_VIDEO_MAX_PLANES];
  gint stride[GST_VIDEO_MAX_PLANES];

  if (CFGetTypeID (cvbuf) != cv->CVPixelBufferGetTypeID ())
    /* TODO: Do we need to handle other buffer types? */
    goto error;

  pixbuf = (CVPixelBufferRef) cvbuf;

  if (cv->CVPixelBufferLockBaseAddress (pixbuf,
          kCVPixelBufferLock_ReadOnly) != kCVReturnSuccess) {
    goto error;
  }

  buf = gst_buffer_new ();

  /* add the corevideo meta to free the underlying corevideo buffer */
  meta = (GstCoreVideoMeta *) gst_buffer_add_meta (buf,
      gst_core_video_meta_get_info (), NULL);
  meta->ctx = g_object_ref (ctx);
  meta->cvbuf = cv->CVBufferRetain (cvbuf);
  meta->pixbuf = pixbuf;

  /* set stride, offset and size */
  memset (&offset, 0, sizeof (offset));
  memset (&stride, 0, sizeof (stride));

  data = cv->CVPixelBufferGetBaseAddress (pixbuf);
  height = cv->CVPixelBufferGetHeight (pixbuf);
  if (cv->CVPixelBufferIsPlanar (pixbuf)) {
    GstVideoInfo tmp_vinfo;

    n_planes = cv->CVPixelBufferGetPlaneCount (pixbuf);
    for (i = 0; i < n_planes; ++i)
      stride[i] = cv->CVPixelBufferGetBytesPerRowOfPlane (pixbuf, i);

    /* FIXME: don't hardcode NV12 */
    gst_video_info_init (&tmp_vinfo);
    gst_video_info_set_format (&tmp_vinfo,
        GST_VIDEO_FORMAT_NV12, stride[0], height);
    offset[1] = tmp_vinfo.offset[1];
    size = tmp_vinfo.size;
  } else {
    n_planes = 1;
    size = cv->CVPixelBufferGetBytesPerRow (pixbuf) * height;
  }

  gst_buffer_append_memory (buf,
      gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, data,
          size, 0, size, NULL, NULL));

  if (vinfo) {
    GstVideoMeta *video_meta;

    width = vinfo->width;
    video_meta =
        gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE,
        GST_VIDEO_FORMAT_NV12, width, height, n_planes, offset, stride);
  }

  return buf;

error:
  return NULL;
}
static GstFlowReturn
gst_multi_file_src_create (GstPushSrc * src, GstBuffer ** buffer)
{
  GstMultiFileSrc *multifilesrc;
  gsize size;
  gchar *data;
  gchar *filename;
  GstBuffer *buf;
  gboolean ret;
  GError *error = NULL;

  multifilesrc = GST_MULTI_FILE_SRC (src);

  if (multifilesrc->index < multifilesrc->start_index) {
    multifilesrc->index = multifilesrc->start_index;
  }
  filename = gst_multi_file_src_get_filename (multifilesrc);

  GST_DEBUG_OBJECT (multifilesrc, "reading from file \"%s\".", filename);

  ret = g_file_get_contents (filename, &data, &size, &error);
  if (!ret) {
    if (multifilesrc->successful_read) {
      /* If we've read at least one buffer successfully, not finding the
       * next file is EOS. */
      g_free (filename);
      if (error != NULL)
        g_error_free (error);

      if (multifilesrc->loop) {
        error = NULL;
        multifilesrc->index = multifilesrc->start_index;

        filename = gst_multi_file_src_get_filename (multifilesrc);
        ret = g_file_get_contents (filename, &data, &size, &error);
        if (!ret) {
          g_free (filename);
          if (error != NULL)
            g_error_free (error);

          return GST_FLOW_EOS;
        }
      } else {
        return GST_FLOW_EOS;
      }
    } else {
      goto handle_error;
    }
  }

  multifilesrc->successful_read = TRUE;
  multifilesrc->index++;
  if (multifilesrc->stop_index != -1 &&
      multifilesrc->index >= multifilesrc->stop_index) {
    multifilesrc->index = multifilesrc->start_index;
  }

  buf = gst_buffer_new ();
  gst_buffer_append_memory (buf,
      gst_memory_new_wrapped (0, data, size, 0, size, data, g_free));
  GST_BUFFER_OFFSET (buf) = multifilesrc->offset;
  GST_BUFFER_OFFSET_END (buf) = multifilesrc->offset + size;
  multifilesrc->offset += size;

  GST_DEBUG_OBJECT (multifilesrc, "read file \"%s\".", filename);

  g_free (filename);
  *buffer = buf;
  return GST_FLOW_OK;

handle_error:
  {
    if (error != NULL) {
      GST_ELEMENT_ERROR (multifilesrc, RESOURCE, READ,
          ("Error while reading from file \"%s\".", filename),
          ("%s", error->message));
      g_error_free (error);
    } else {
      GST_ELEMENT_ERROR (multifilesrc, RESOURCE, READ,
          ("Error while reading from file \"%s\".", filename),
          ("%s", g_strerror (errno)));
    }
    g_free (filename);
    return GST_FLOW_ERROR;
  }
}
Beispiel #12
0
/* Retrieve a frame and feed it into the pipeline
 */
static void
recorder_record_frame (ShellRecorder *recorder,
                       gboolean       paint)
{
  GstBuffer *buffer;
  ClutterCapture *captures;
  int n_captures;
  cairo_surface_t *image;
  guint size;
  uint8_t *data;
  GstMemory *memory;
  int i;
  GstClock *clock;
  GstClockTime now, base_time;

  g_return_if_fail (recorder->current_pipeline != NULL);

  /* If we get into the red zone, stop buffering new frames; 13/16 is
  * a bit more than the 3/4 threshold for a red indicator to keep the
  * indicator from flashing between red and yellow. */
  if (recorder->memory_used > (recorder->memory_target * 13) / 16)
    return;

  /* Drop frames to get down to something like the target frame rate; since frames
   * are generated with VBlank sync, we don't have full control anyways, so we just
   * drop frames if the interval since the last frame is less than 75% of the
   * desired inter-frame interval.
   */
  clock = gst_element_get_clock (recorder->current_pipeline->src);

  /* If we have no clock yet, the pipeline is not yet in PLAYING */
  if (!clock)
    return;

  base_time = gst_element_get_base_time (recorder->current_pipeline->src);
  now = gst_clock_get_time (clock) - base_time;
  gst_object_unref (clock);

  if (GST_CLOCK_TIME_IS_VALID (recorder->last_frame_time) &&
      now - recorder->last_frame_time < gst_util_uint64_scale_int (GST_SECOND, 3, 4 * recorder->framerate))
    return;
  recorder->last_frame_time = now;

  clutter_stage_capture (recorder->stage, paint, &recorder->area,
                         &captures, &n_captures);

  if (n_captures == 0)
    return;

  if (n_captures == 1)
    image = cairo_surface_reference (captures[0].image);
  else
    image = shell_util_composite_capture_images (captures,
                                                 n_captures,
                                                 recorder->area.x,
                                                 recorder->area.y,
                                                 recorder->area.width,
                                                 recorder->area.height);

  data = cairo_image_surface_get_data (image);
  size = (cairo_image_surface_get_height (image) *
          cairo_image_surface_get_stride (image));

  for (i = 0; i < n_captures; i++)
    cairo_surface_destroy (captures[i].image);
  g_free (captures);

  buffer = gst_buffer_new();
  memory = gst_memory_new_wrapped (0, data, size, 0, size,
                                   image,
                                   (GDestroyNotify) cairo_surface_destroy);
  gst_buffer_insert_memory (buffer, -1, memory);

  GST_BUFFER_PTS(buffer) = now;

  if (recorder->draw_cursor &&
      !g_settings_get_boolean (recorder->a11y_settings, MAGNIFIER_ACTIVE_KEY))
    recorder_draw_cursor (recorder, buffer);

  shell_recorder_src_add_buffer (SHELL_RECORDER_SRC (recorder->current_pipeline->src), buffer);
  gst_buffer_unref (buffer);

  /* Reset the timeout that we used to avoid an overlong pause in the stream */
  recorder_remove_redraw_timeout (recorder);
  recorder_add_redraw_timeout (recorder);
}
Beispiel #13
0
static GstFlowReturn
gst_niimaqsrc_create (GstPushSrc * psrc, GstBuffer ** buffer)
{
  GstNiImaqSrc *src = GST_NIIMAQSRC (psrc);
  GstFlowReturn ret = GST_FLOW_OK;
  GstClockTime timestamp;
  GstClockTime duration;
  uInt32 copied_number;
  uInt32 copied_index;
  Int32 rval;
  uInt32 dropped;
  gboolean no_copy;
  GstMapInfo minfo;

  /* we can only do a no-copy if strides are property byte aligned */
  no_copy = src->avoid_copy && src->width == src->rowpixels;

  /* start the IMAQ acquisition session if we haven't done so yet */
  if (!src->session_started) {
    if (!gst_niimaqsrc_start_acquisition (src)) {
      GST_ELEMENT_ERROR (src, RESOURCE, FAILED,
          ("Unable to start acquisition."), (NULL));
      return GST_FLOW_ERROR;
    }
  }

  if (no_copy) {
    GST_LOG_OBJECT (src,
        "Sending IMAQ buffer #%d along without copying", src->cumbufnum);
    *buffer = gst_buffer_new ();
    if (G_UNLIKELY (*buffer == NULL))
      goto error;
  } else {
    GST_LOG_OBJECT (src, "Copying IMAQ buffer #%d, size %d",
        src->cumbufnum, src->framesize);
    ret =
        GST_BASE_SRC_CLASS (gst_niimaqsrc_parent_class)->alloc (GST_BASE_SRC
        (src), 0, src->framesize, buffer);
    if (ret != GST_FLOW_OK) {
      GST_ELEMENT_ERROR (src, RESOURCE, FAILED,
          ("Failed to allocate buffer"),
          ("Failed to get downstream pad to allocate buffer"));
      goto error;
    }
  }

  //{
  // guint32 *data;
  // int i;
  //    rval = imgSessionExamineBuffer2 (src->sid, src->cumbufnum, &copied_number, &data);
  // for (i=0; i<src->bufsize;i++)
  //  if (data == src->buflist[i])
  //        break;
  // timestamp = src->times[i];
  // memcpy (GST_BUFFER_DATA (*buffer), data, src->framesize);
  // src->times[i] = GST_CLOCK_TIME_NONE;
  // imgSessionReleaseBuffer (src->sid); //TODO: mutex here?
  //}
  if (no_copy) {
    /* FIXME: with callback change, is this broken now? mutex... */
    gpointer data;
    rval =
        imgSessionExamineBuffer2 (src->sid, src->cumbufnum,
        &copied_number, &data);
    gst_buffer_append_memory (*buffer,
        gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY, data,
            src->framesize, 0, src->framesize, src,
            gst_niimaqsrc_release_buffer));
  } else if (src->width == src->rowpixels) {
    /* TODO: optionally use ExamineBuffer and byteswap in transfer (to offer BIG_ENDIAN) */
    gst_buffer_map (*buffer, &minfo, GST_MAP_WRITE);
    g_mutex_lock (&src->mutex);
    rval =
        imgSessionCopyBufferByNumber (src->sid, src->cumbufnum,
        minfo.data, IMG_OVERWRITE_GET_OLDEST, &copied_number, &copied_index);
    timestamp = src->times[copied_index];
    src->times[copied_index] = GST_CLOCK_TIME_NONE;
    g_mutex_unlock (&src->mutex);
    gst_buffer_unmap (*buffer, &minfo);
  } else {
    gst_buffer_map (*buffer, &minfo, GST_MAP_WRITE);
    g_mutex_lock (&src->mutex);
    rval =
        imgSessionCopyAreaByNumber (src->sid, src->cumbufnum, 0, 0,
        src->height, src->width, minfo.data, src->rowpixels,
        IMG_OVERWRITE_GET_OLDEST, &copied_number, &copied_index);
    timestamp = src->times[copied_index];
    src->times[copied_index] = GST_CLOCK_TIME_NONE;
    g_mutex_unlock (&src->mutex);
    gst_buffer_unmap (*buffer, &minfo);
  }

  /* TODO: do this above to reduce copying overhead */
  if (src->is_signed) {
    gint16 *src;
    guint16 *dst;
    guint i;
    gst_buffer_map (*buffer, &minfo, GST_MAP_READWRITE);
    src = minfo.data;
    dst = minfo.data;

    GST_DEBUG_OBJECT (src, "Shifting signed to unsigned");

    /* TODO: make this faster */
    for (i = 0; i < minfo.size / 2; i++)
      *dst++ = *src++ + 32768;

    gst_buffer_unmap (*buffer, &minfo);
  }

  if (rval) {
    gst_niimaqsrc_report_imaq_error (rval);
    GST_ELEMENT_ERROR (src, RESOURCE, FAILED,
        ("failed to copy buffer %d", src->cumbufnum),
        ("failed to copy buffer %d", src->cumbufnum));
    goto error;
  }

  /* make guess of duration from timestamp and cumulative buffer number */
  if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
    duration = timestamp / (copied_number + 1);
  } else {
    duration = 33 * GST_MSECOND;
  }

  GST_BUFFER_OFFSET (*buffer) = copied_number;
  GST_BUFFER_OFFSET_END (*buffer) = copied_number + 1;
  GST_BUFFER_TIMESTAMP (*buffer) =
      timestamp - gst_element_get_base_time (GST_ELEMENT (src));
  GST_BUFFER_DURATION (*buffer) = duration;

  dropped = copied_number - src->cumbufnum;
  if (dropped > 0) {
    src->n_dropped_frames += dropped;
    GST_WARNING_OBJECT (src,
        "Asked to copy buffer #%d but was given #%d; just dropped %d frames (%d total)",
        src->cumbufnum, copied_number, dropped, src->n_dropped_frames);
  }

  /* set cumulative buffer number to get next frame */
  src->cumbufnum = copied_number + 1;
  src->n_frames++;

  if (G_UNLIKELY (src->start_time && !src->start_time_sent)) {
    GstTagList *tl =
        gst_tag_list_new (GST_TAG_DATE_TIME, src->start_time, NULL);
    GstEvent *e = gst_event_new_tag (tl);
    GST_DEBUG_OBJECT (src, "Sending start time event: %" GST_PTR_FORMAT, e);
    gst_pad_push_event (GST_BASE_SRC_PAD (src), e);
    src->start_time_sent = TRUE;
  }
  return ret;

error:
  {
    return ret;
  }
}
GstBuffer *
gst_core_media_buffer_new (GstCoreMediaCtx * ctx, CMSampleBufferRef sample_buf)
{
  GstCVApi *cv = ctx->cv;
  GstCMApi *cm = ctx->cm;
  CVImageBufferRef image_buf;
  CVPixelBufferRef pixel_buf;
  CMBlockBufferRef block_buf;
  Byte *data = NULL;
  UInt32 size;
  OSStatus status;
  GstBuffer *buf;
  GstCoreMediaMeta *meta;

  image_buf = cm->CMSampleBufferGetImageBuffer (sample_buf);
  pixel_buf = NULL;
  block_buf = cm->CMSampleBufferGetDataBuffer (sample_buf);

  if (image_buf != NULL &&
      CFGetTypeID (image_buf) == cv->CVPixelBufferGetTypeID ()) {
    pixel_buf = (CVPixelBufferRef) image_buf;

    if (cv->CVPixelBufferLockBaseAddress (pixel_buf,
            kCVPixelBufferLock_ReadOnly) != kCVReturnSuccess) {
      goto error;
    }

    if (cv->CVPixelBufferIsPlanar (pixel_buf)) {
      gint plane_count, plane_idx;

      data = cv->CVPixelBufferGetBaseAddressOfPlane (pixel_buf, 0);

      size = 0;
      plane_count = cv->CVPixelBufferGetPlaneCount (pixel_buf);
      for (plane_idx = 0; plane_idx != plane_count; plane_idx++) {
        size += cv->CVPixelBufferGetBytesPerRowOfPlane (pixel_buf, plane_idx) *
            cv->CVPixelBufferGetHeightOfPlane (pixel_buf, plane_idx);
      }
    } else {
      data = cv->CVPixelBufferGetBaseAddress (pixel_buf);
      size = cv->CVPixelBufferGetBytesPerRow (pixel_buf) *
          cv->CVPixelBufferGetHeight (pixel_buf);
    }
  } else if (block_buf != NULL) {
    status = cm->CMBlockBufferGetDataPointer (block_buf, 0, 0, 0, &data);
    if (status != noErr)
      goto error;
    size = cm->CMBlockBufferGetDataLength (block_buf);
  } else {
    goto error;
  }

  buf = gst_buffer_new ();

  meta = (GstCoreMediaMeta *) gst_buffer_add_meta (buf,
      gst_core_media_meta_get_info (), NULL);
  meta->ctx = g_object_ref (ctx);
  meta->sample_buf = cm->FigSampleBufferRetain (sample_buf);
  meta->image_buf = image_buf;
  meta->pixel_buf = pixel_buf;
  meta->block_buf = block_buf;

  gst_buffer_append_memory (buf,
      gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, data,
          size, 0, size, NULL, NULL));

  return buf;

error:
  return NULL;
}
Beispiel #15
0
static gboolean
parse_jpeg_segment (GstJpegSegment * segment)
{
  switch (segment->marker) {
    case GST_JPEG_MARKER_SOF0:
    case GST_JPEG_MARKER_SOF1:
    case GST_JPEG_MARKER_SOF2:
    case GST_JPEG_MARKER_SOF3:
    case GST_JPEG_MARKER_SOF9:
    case GST_JPEG_MARKER_SOF10:
    case GST_JPEG_MARKER_SOF11:{
      GstJpegFrameHdr hdr;
      int i;

      if (!gst_jpeg_segment_parse_frame_header (segment, &hdr)) {
        g_printerr ("Failed to parse frame header!\n");
        return FALSE;
      }

      g_print ("\t\twidth x height   = %u x %u\n", hdr.width, hdr.height);
      g_print ("\t\tsample precision = %u\n", hdr.sample_precision);
      g_print ("\t\tnum components   = %u\n", hdr.num_components);
      for (i = 0; i < hdr.num_components; ++i) {
        g_print ("\t\t%d: id=%d, h=%d, v=%d, qts=%d\n", i,
            hdr.components[i].identifier, hdr.components[i].horizontal_factor,
            hdr.components[i].vertical_factor,
            hdr.components[i].quant_table_selector);
      }
      break;
    }
    case GST_JPEG_MARKER_DHT:{
      GstJpegHuffmanTables ht;

      if (!gst_jpeg_segment_parse_huffman_table (segment, &ht)) {
        g_printerr ("Failed to parse huffman table!\n");
        return FALSE;
      }
      break;
    }
    case GST_JPEG_MARKER_DQT:{
      GstJpegQuantTables qt;

      if (!gst_jpeg_segment_parse_quantization_table (segment, &qt)) {
        g_printerr ("Failed to parse quantization table!\n");
        return FALSE;
      }
      break;
    }
    case GST_JPEG_MARKER_SOS:{
      GstJpegScanHdr hdr;
      int i;

      if (!gst_jpeg_segment_parse_scan_header (segment, &hdr)) {
        g_printerr ("Failed to parse scan header!\n");
        return FALSE;
      }

      g_print ("\t\tnum components   = %u\n", hdr.num_components);
      for (i = 0; i < hdr.num_components; ++i) {
        g_print ("\t\t  %d: cs=%d, dcs=%d, acs=%d\n", i,
            hdr.components[i].component_selector,
            hdr.components[i].dc_selector, hdr.components[i].ac_selector);
      }
    }
    case GST_JPEG_MARKER_COM:
      /* gst_util_dump_mem (segment->data + segment->offset, segment->size); */
      break;
    default:
      if (segment->marker >= GST_JPEG_MARKER_APP_MIN
          && segment->marker <= GST_JPEG_MARKER_APP_MAX) {
        guint n = segment->marker - GST_JPEG_MARKER_APP_MIN;

        if (app_segments[n] == NULL)
          app_segments[n] = gst_buffer_new ();

        gst_buffer_append_memory (app_segments[n],
            gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
                (guint8 *) segment->data + segment->offset,
                segment->size, 0, segment->size, NULL, NULL));
      }
      break;
  }
  return TRUE;
}
static GstFlowReturn
gst_openjpeg_enc_handle_frame (GstVideoEncoder * encoder,
    GstVideoCodecFrame * frame)
{
  GstOpenJPEGEnc *self = GST_OPENJPEG_ENC (encoder);
  GstFlowReturn ret = GST_FLOW_OK;
#ifdef HAVE_OPENJPEG_1
  opj_cinfo_t *enc;
  GstMapInfo map;
  guint length;
  opj_cio_t *io;
#else
  opj_codec_t *enc;
  opj_stream_t *stream;
  MemStream mstream;
#endif
  opj_image_t *image;
  GstVideoFrame vframe;

  GST_DEBUG_OBJECT (self, "Handling frame");

  enc = opj_create_compress (self->codec_format);
  if (!enc)
    goto initialization_error;

#ifdef HAVE_OPENJPEG_1
  if (G_UNLIKELY (gst_debug_category_get_threshold (GST_CAT_DEFAULT) >=
          GST_LEVEL_TRACE)) {
    opj_event_mgr_t callbacks;

    callbacks.error_handler = gst_openjpeg_enc_opj_error;
    callbacks.warning_handler = gst_openjpeg_enc_opj_warning;
    callbacks.info_handler = gst_openjpeg_enc_opj_info;
    opj_set_event_mgr ((opj_common_ptr) enc, &callbacks, self);
  } else {
    opj_set_event_mgr ((opj_common_ptr) enc, NULL, NULL);
  }
#else
  if (G_UNLIKELY (gst_debug_category_get_threshold (GST_CAT_DEFAULT) >=
          GST_LEVEL_TRACE)) {
    opj_set_info_handler (enc, gst_openjpeg_enc_opj_info, self);
    opj_set_warning_handler (enc, gst_openjpeg_enc_opj_warning, self);
    opj_set_error_handler (enc, gst_openjpeg_enc_opj_error, self);
  } else {
    opj_set_info_handler (enc, NULL, NULL);
    opj_set_warning_handler (enc, NULL, NULL);
    opj_set_error_handler (enc, NULL, NULL);
  }
#endif

  if (!gst_video_frame_map (&vframe, &self->input_state->info,
          frame->input_buffer, GST_MAP_READ))
    goto map_read_error;

  image = gst_openjpeg_enc_fill_image (self, &vframe);
  if (!image)
    goto fill_image_error;
  gst_video_frame_unmap (&vframe);

  opj_setup_encoder (enc, &self->params, image);

#ifdef HAVE_OPENJPEG_1
  io = opj_cio_open ((opj_common_ptr) enc, NULL, 0);
  if (!io)
    goto open_error;

  if (!opj_encode (enc, io, image, NULL))
    goto encode_error;

  opj_image_destroy (image);

  length = cio_tell (io);

  ret =
      gst_video_encoder_allocate_output_frame (encoder, frame,
      length + (self->is_jp2c ? 8 : 0));
  if (ret != GST_FLOW_OK)
    goto allocate_error;

  gst_buffer_fill (frame->output_buffer, self->is_jp2c ? 8 : 0, io->buffer,
      length);
  if (self->is_jp2c) {
    gst_buffer_map (frame->output_buffer, &map, GST_MAP_WRITE);
    GST_WRITE_UINT32_BE (map.data, length + 8);
    GST_WRITE_UINT32_BE (map.data + 4, GST_MAKE_FOURCC ('j', 'p', '2', 'c'));
    gst_buffer_unmap (frame->output_buffer, &map);
  }

  opj_cio_close (io);
  opj_destroy_compress (enc);
#else
  stream = opj_stream_create (4096, OPJ_FALSE);
  if (!stream)
    goto open_error;

  mstream.allocsize = 4096;
  mstream.data = g_malloc (mstream.allocsize);
  mstream.offset = 0;
  mstream.size = 0;

  opj_stream_set_read_function (stream, read_fn);
  opj_stream_set_write_function (stream, write_fn);
  opj_stream_set_skip_function (stream, skip_fn);
  opj_stream_set_seek_function (stream, seek_fn);
  opj_stream_set_user_data (stream, &mstream);
  opj_stream_set_user_data_length (stream, mstream.size);

  if (!opj_start_compress (enc, image, stream))
    goto encode_error;

  if (!opj_encode (enc, stream))
    goto encode_error;

  if (!opj_end_compress (enc, stream))
    goto encode_error;

  opj_image_destroy (image);
  opj_stream_destroy (stream);
  opj_destroy_codec (enc);

  frame->output_buffer = gst_buffer_new ();

  if (self->is_jp2c) {
    GstMapInfo map;
    GstMemory *mem;

    mem = gst_allocator_alloc (NULL, 8, NULL);
    gst_memory_map (mem, &map, GST_MAP_WRITE);
    GST_WRITE_UINT32_BE (map.data, mstream.size + 8);
    GST_WRITE_UINT32_BE (map.data + 4, GST_MAKE_FOURCC ('j', 'p', '2', 'c'));
    gst_memory_unmap (mem, &map);
    gst_buffer_append_memory (frame->output_buffer, mem);
  }

  gst_buffer_append_memory (frame->output_buffer,
      gst_memory_new_wrapped (0, mstream.data, mstream.allocsize, 0,
          mstream.size, NULL, (GDestroyNotify) g_free));
#endif

  ret = gst_video_encoder_finish_frame (encoder, frame);

  return ret;

initialization_error:
  {
    gst_video_codec_frame_unref (frame);
    GST_ELEMENT_ERROR (self, LIBRARY, INIT,
        ("Failed to initialize OpenJPEG encoder"), (NULL));
    return GST_FLOW_ERROR;
  }
map_read_error:
  {
#ifdef HAVE_OPENJPEG_1
    opj_destroy_compress (enc);
#else
    opj_destroy_codec (enc);
#endif
    gst_video_codec_frame_unref (frame);

    GST_ELEMENT_ERROR (self, CORE, FAILED,
        ("Failed to map input buffer"), (NULL));
    return GST_FLOW_ERROR;
  }
fill_image_error:
  {
#ifdef HAVE_OPENJPEG_1
    opj_destroy_compress (enc);
#else
    opj_destroy_codec (enc);
#endif
    gst_video_frame_unmap (&vframe);
    gst_video_codec_frame_unref (frame);

    GST_ELEMENT_ERROR (self, LIBRARY, INIT,
        ("Failed to fill OpenJPEG image"), (NULL));
    return GST_FLOW_ERROR;
  }
open_error:
  {
    opj_image_destroy (image);
#ifdef HAVE_OPENJPEG_1
    opj_destroy_compress (enc);
#else
    opj_destroy_codec (enc);
#endif
    gst_video_codec_frame_unref (frame);

    GST_ELEMENT_ERROR (self, LIBRARY, INIT,
        ("Failed to open OpenJPEG data"), (NULL));
    return GST_FLOW_ERROR;
  }
encode_error:
  {
#ifdef HAVE_OPENJPEG_1
    opj_cio_close (io);
    opj_image_destroy (image);
    opj_destroy_compress (enc);
#else
    opj_stream_destroy (stream);
    g_free (mstream.data);
    opj_image_destroy (image);
    opj_destroy_codec (enc);
#endif
    gst_video_codec_frame_unref (frame);

    GST_ELEMENT_ERROR (self, STREAM, ENCODE,
        ("Failed to encode OpenJPEG stream"), (NULL));
    return GST_FLOW_ERROR;
  }
#ifdef HAVE_OPENJPEG_1
allocate_error:
  {
    opj_cio_close (io);
    opj_destroy_compress (enc);
    gst_video_codec_frame_unref (frame);

    GST_ELEMENT_ERROR (self, CORE, FAILED,
        ("Failed to allocate output buffer"), (NULL));
    return ret;
  }
#endif
}
Beispiel #17
-1
static void
src_need_data_cb (GstElement * src, guint size, gpointer data)
{
  GstBuffer *buf;
  GstFlowReturn ret;

  buf = gst_buffer_new ();
  gst_buffer_append_memory (buf,
      gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
          (gpointer) dummytext, sizeof (dummytext), 0,
          sizeof (dummytext), NULL, NULL));

  GST_BUFFER_OFFSET (buf) = 0;

  g_signal_emit_by_name (src, "push-buffer", buf, &ret);
  gst_buffer_unref (buf);

  fail_unless (ret == GST_FLOW_OK);
}