예제 #1
0
void mapGstBuffer(GstBuffer* buffer)
{
    GstMapInfo* mapInfo = g_slice_new(GstMapInfo);
    if (!gst_buffer_map(buffer, mapInfo, GST_MAP_WRITE)) {
        g_slice_free(GstMapInfo, mapInfo);
        gst_buffer_unref(buffer);
        return;
    }

    GstMiniObject* miniObject = reinterpret_cast<GstMiniObject*>(buffer);
    gst_mini_object_set_qdata(miniObject, g_quark_from_static_string(webkitGstMapInfoQuarkString), mapInfo, 0);
}
예제 #2
0
static GstFlowReturn
gst_v4l2_buffer_pool_import_dmabuf (GstV4l2BufferPool * pool,
    GstBuffer * dest, GstBuffer * src)
{
  GstV4l2MemoryGroup *group = NULL;
  GstMemory *dma_mem[GST_VIDEO_MAX_PLANES] = { 0 };
  guint n_mem = gst_buffer_n_memory (src);
  gint i;

  GST_LOG_OBJECT (pool, "importing dmabuf");

  if (!gst_v4l2_is_buffer_valid (dest, &group))
    goto not_our_buffer;

  if (n_mem > GST_VIDEO_MAX_PLANES)
    goto too_many_mems;

  for (i = 0; i < n_mem; i++)
    dma_mem[i] = gst_buffer_peek_memory (src, i);

  if (!gst_v4l2_allocator_import_dmabuf (pool->vallocator, group, n_mem,
          dma_mem))
    goto import_failed;

  gst_mini_object_set_qdata (GST_MINI_OBJECT (dest), GST_V4L2_IMPORT_QUARK,
      gst_buffer_ref (src), (GDestroyNotify) gst_buffer_unref);

  return GST_FLOW_OK;

not_our_buffer:
  {
    GST_ERROR_OBJECT (pool, "destination buffer invalid or not from our pool");
    return GST_FLOW_ERROR;
  }
too_many_mems:
  {
    GST_ERROR_OBJECT (pool, "could not map buffer");
    return GST_FLOW_ERROR;
  }
import_failed:
  {
    GST_ERROR_OBJECT (pool, "failed to import dmabuf");
    return GST_FLOW_ERROR;
  }
}
예제 #3
0
static void
gst_omx_buffer_pool_free_buffer (GstBufferPool * bpool, GstBuffer * buffer)
{
  GstOMXBufferPool *pool = GST_OMX_BUFFER_POOL (bpool);

  /* If the buffers belong to another pool, restore them now */
  GST_OBJECT_LOCK (pool);
  if (pool->other_pool) {
    gst_object_replace ((GstObject **) & buffer->pool,
        (GstObject *) pool->other_pool);
  }
  GST_OBJECT_UNLOCK (pool);

  gst_mini_object_set_qdata (GST_MINI_OBJECT_CAST (buffer),
      gst_omx_buffer_data_quark, NULL, NULL);

  GST_BUFFER_POOL_CLASS (gst_omx_buffer_pool_parent_class)->free_buffer (bpool,
      buffer);
}
GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_dmabuf (GstV4l2Allocator * allocator,
    GstAllocator * dmabuf_allocator)
{
  GstV4l2MemoryGroup *group;
  gint i;

  g_return_val_if_fail (allocator->memory == V4L2_MEMORY_MMAP, NULL);

  group = gst_v4l2_allocator_alloc (allocator);

  if (group == NULL)
    return NULL;

  for (i = 0; i < group->n_mem; i++) {
    GstV4l2Memory *mem;
    GstMemory *dma_mem;
    gint dmafd;

    if (group->mem[i] == NULL) {
      struct v4l2_exportbuffer expbuf = { 0 };

      expbuf.type = allocator->type;
      expbuf.index = group->buffer.index;
      expbuf.plane = i;
      expbuf.flags = O_CLOEXEC | O_RDWR;

      if (v4l2_ioctl (allocator->video_fd, VIDIOC_EXPBUF, &expbuf) < 0)
        goto expbuf_failed;

      GST_LOG_OBJECT (allocator, "exported DMABUF as fd %i plane %d",
          expbuf.fd, i);

      group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
          NULL, group->planes[i].length, 0, 0, group->planes[i].length, i,
          NULL, expbuf.fd, group);
    } else {
      /* Take back the allocator reference */
      gst_object_ref (allocator);
    }

    g_assert (gst_is_v4l2_memory (group->mem[i]));
    mem = (GstV4l2Memory *) group->mem[i];

    if ((dmafd = dup (mem->dmafd)) < 0)
      goto dup_failed;

    dma_mem = gst_dmabuf_allocator_alloc (dmabuf_allocator, dmafd,
        mem->mem.maxsize);

    gst_mini_object_set_qdata (GST_MINI_OBJECT (dma_mem),
        GST_V4L2_MEMORY_QUARK, mem, (GDestroyNotify) gst_memory_unref);

    group->mem[i] = dma_mem;
    group->mems_allocated++;
  }

  gst_v4l2_allocator_reset_size (allocator, group);

  return group;

expbuf_failed:
  {
    GST_ERROR_OBJECT (allocator, "Failed to export DMABUF: %s",
        g_strerror (errno));
    goto cleanup;
  }
dup_failed:
  {
    GST_ERROR_OBJECT (allocator, "Failed to dup DMABUF descriptor: %s",
        g_strerror (errno));
    goto cleanup;
  }
cleanup:
  {
    _cleanup_failed_alloc (allocator, group);
    return NULL;
  }
}
예제 #5
0
static void
gst_v4l2_buffer_pool_flush_stop (GstBufferPool * bpool)
{
  GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
  GstV4l2Object *obj = pool->obj;
  gint i;

  GST_DEBUG_OBJECT (pool, "stop flushing");

  /* If we haven't started streaming yet, simply call streamon */
  if (!pool->streaming)
    goto streamon;

  if (pool->other_pool)
    gst_buffer_pool_set_flushing (pool->other_pool, FALSE);

  if (!gst_v4l2_buffer_pool_streamoff (pool))
    goto stop_failed;

  gst_v4l2_allocator_flush (pool->vallocator);

  /* Reset our state */
  switch (obj->mode) {
    case GST_V4L2_IO_RW:
      break;
    case GST_V4L2_IO_MMAP:
    case GST_V4L2_IO_USERPTR:
    case GST_V4L2_IO_DMABUF:
    case GST_V4L2_IO_DMABUF_IMPORT:
    {
      gsize num_allocated;

      num_allocated = gst_v4l2_allocator_num_allocated (pool->vallocator);

      for (i = 0; i < num_allocated; i++) {
        /* Re-enqueue buffers */
        if (pool->buffers[i]) {
          GstBufferPool *bpool = (GstBufferPool *) pool;
          GstBuffer *buffer = pool->buffers[i];

          pool->buffers[i] = NULL;

          /* Remove qdata, this will unmap any map data in
           * userptr/dmabuf-import */
          gst_mini_object_set_qdata (GST_MINI_OBJECT (buffer),
              GST_V4L2_IMPORT_QUARK, NULL, NULL);

          if (V4L2_TYPE_IS_OUTPUT (obj->type))
            gst_buffer_unref (buffer);
          else
            gst_v4l2_buffer_pool_release_buffer (bpool, buffer);

          g_atomic_int_add (&pool->num_queued, -1);
        }
      }

      break;
    }
    default:
      g_assert_not_reached ();
      break;
  }

streamon:
  /* Start streaming on capture device only */
  if (!V4L2_TYPE_IS_OUTPUT (obj->type))
    gst_v4l2_buffer_pool_streamon (pool);

  gst_poll_set_flushing (pool->poll, FALSE);

  return;

  /* ERRORS */
stop_failed:
  {
    GST_ERROR_OBJECT (pool, "device refused to flush");
  }
}
예제 #6
0
static GstFlowReturn
gst_v4l2_buffer_pool_import_userptr (GstV4l2BufferPool * pool,
    GstBuffer * dest, GstBuffer * src)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstV4l2MemoryGroup *group = NULL;
  GstMapFlags flags;
  const GstVideoFormatInfo *finfo = pool->caps_info.finfo;
  struct UserPtrData *data = NULL;

  GST_LOG_OBJECT (pool, "importing userptr");

  /* get the group */
  if (!gst_v4l2_is_buffer_valid (dest, &group))
    goto not_our_buffer;

  if (V4L2_TYPE_IS_OUTPUT (pool->obj->type))
    flags = GST_MAP_READ;
  else
    flags = GST_MAP_WRITE;

  data = g_slice_new0 (struct UserPtrData);

  if (finfo && (finfo->format != GST_VIDEO_FORMAT_UNKNOWN &&
          finfo->format != GST_VIDEO_FORMAT_ENCODED)) {
    data->is_frame = TRUE;

    if (!gst_video_frame_map (&data->frame, &pool->caps_info, src, flags))
      goto invalid_buffer;

    if (!gst_v4l2_allocator_import_userptr (pool->vallocator, group,
            data->frame.info.size, finfo->n_planes, data->frame.data,
            data->frame.info.offset))
      goto import_failed;
  } else {
    gsize offset[1] = { 0 };
    gpointer ptr[1];

    data->is_frame = FALSE;

    if (!gst_buffer_map (src, &data->map, flags))
      goto invalid_buffer;

    ptr[0] = data->map.data;

    if (!gst_v4l2_allocator_import_userptr (pool->vallocator, group,
            data->map.size, 1, ptr, offset))
      goto import_failed;
  }

  data->buffer = gst_buffer_ref (src);

  gst_mini_object_set_qdata (GST_MINI_OBJECT (dest), GST_V4L2_IMPORT_QUARK,
      data, (GDestroyNotify) _unmap_userptr_frame);

  return ret;

not_our_buffer:
  {
    GST_ERROR_OBJECT (pool, "destination buffer invalid or not from our pool");
    return GST_FLOW_ERROR;
  }
invalid_buffer:
  {
    GST_ERROR_OBJECT (pool, "could not map buffer");
    g_slice_free (struct UserPtrData, data);
    return GST_FLOW_ERROR;
  }
import_failed:
  {
    GST_ERROR_OBJECT (pool, "failed to import data");
    _unmap_userptr_frame (data);
    return GST_FLOW_ERROR;
  }
}
예제 #7
0
/**
 * gst_v4l2_buffer_pool_process:
 * @bpool: a #GstBufferPool
 * @buf: a #GstBuffer, maybe be replaced
 *
 * Process @buf in @bpool. For capture devices, this functions fills @buf with
 * data from the device. For output devices, this functions send the contents of
 * @buf to the device for playback.
 *
 * Returns: %GST_FLOW_OK on success.
 */
GstFlowReturn
gst_v4l2_buffer_pool_process (GstV4l2BufferPool * pool, GstBuffer ** buf)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstBufferPool *bpool = GST_BUFFER_POOL_CAST (pool);
  GstV4l2Object *obj = pool->obj;

  GST_DEBUG_OBJECT (pool, "process buffer %p", buf);

  g_return_val_if_fail (gst_buffer_pool_is_active (bpool), GST_FLOW_ERROR);

  if (GST_BUFFER_POOL_IS_FLUSHING (pool))
    return GST_FLOW_FLUSHING;

  switch (obj->type) {
    case V4L2_BUF_TYPE_VIDEO_CAPTURE:
    case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
      /* capture */
      switch (obj->mode) {
        case GST_V4L2_IO_RW:
          /* capture into the buffer */
          ret = gst_v4l2_do_read (pool, *buf);
          break;

        case GST_V4L2_IO_MMAP:
        case GST_V4L2_IO_DMABUF:
        {
          GstBuffer *tmp;

          if ((*buf)->pool == bpool) {
            if (gst_buffer_get_size (*buf) == 0)
              goto eos;

            /* start copying buffers when we are running low on buffers */
            if (g_atomic_int_get (&pool->num_queued) < pool->copy_threshold) {
              GstBuffer *copy;

              if (GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, MMAP)) {

                if (gst_buffer_pool_acquire_buffer (bpool, &copy,
                        NULL) == GST_FLOW_OK) {
                  gst_v4l2_buffer_pool_release_buffer (bpool, copy);
                  goto done;
                }
              }

              /* copy the buffer */
              copy = gst_buffer_copy_region (*buf,
                  GST_BUFFER_COPY_ALL | GST_BUFFER_COPY_DEEP, 0, -1);
              GST_LOG_OBJECT (pool, "copy buffer %p->%p", *buf, copy);

              /* and requeue so that we can continue capturing */
              gst_buffer_unref (*buf);
              *buf = copy;
            }

            /* nothing, data was inside the buffer when we did _acquire() */
            goto done;
          }

          /* buffer not from our pool, grab a frame and copy it into the target */
          if ((ret = gst_v4l2_buffer_pool_dqbuf (pool, &tmp)) != GST_FLOW_OK)
            goto done;

          /* An empty buffer on capture indicates the end of stream */
          if (gst_buffer_get_size (tmp) == 0) {
            gst_v4l2_buffer_pool_release_buffer (bpool, tmp);
            goto eos;
          }

          ret = gst_v4l2_buffer_pool_copy_buffer (pool, *buf, tmp);

          /* an queue the buffer again after the copy */
          gst_v4l2_buffer_pool_release_buffer (bpool, tmp);

          if (ret != GST_FLOW_OK)
            goto copy_failed;
          break;
        }

        case GST_V4L2_IO_USERPTR:
        {
          struct UserPtrData *data;

          /* Replace our buffer with downstream allocated buffer */
          data = gst_mini_object_steal_qdata (GST_MINI_OBJECT (*buf),
              GST_V4L2_IMPORT_QUARK);
          gst_buffer_replace (buf, data->buffer);
          _unmap_userptr_frame (data);
          break;
        }

        case GST_V4L2_IO_DMABUF_IMPORT:
        {
          GstBuffer *tmp;

          /* Replace our buffer with downstream allocated buffer */
          tmp = gst_mini_object_steal_qdata (GST_MINI_OBJECT (*buf),
              GST_V4L2_IMPORT_QUARK);
          gst_buffer_replace (buf, tmp);
          gst_buffer_unref (tmp);
          break;
        }

        default:
          g_assert_not_reached ();
          break;
      }
      break;

    case V4L2_BUF_TYPE_VIDEO_OUTPUT:
    case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
      /* playback */
      switch (obj->mode) {
        case GST_V4L2_IO_RW:
          /* FIXME, do write() */
          GST_WARNING_OBJECT (pool, "implement write()");
          break;

        case GST_V4L2_IO_USERPTR:
        case GST_V4L2_IO_DMABUF_IMPORT:
        case GST_V4L2_IO_DMABUF:
        case GST_V4L2_IO_MMAP:
        {
          GstBuffer *to_queue = NULL;
          GstV4l2MemoryGroup *group;
          gint index;

          if ((*buf)->pool != bpool)
            goto copying;

          if (!gst_v4l2_is_buffer_valid (*buf, &group))
            goto copying;

          index = group->buffer.index;

          GST_LOG_OBJECT (pool, "processing buffer %i from our pool", index);

          index = group->buffer.index;
          if (pool->buffers[index] != NULL) {
            GST_LOG_OBJECT (pool, "buffer %i already queued, copying", index);
            goto copying;
          }

          /* we can queue directly */
          to_queue = gst_buffer_ref (*buf);

        copying:
          if (to_queue == NULL) {
            GstBufferPoolAcquireParams params = { 0 };

            GST_LOG_OBJECT (pool, "alloc buffer from our pool");

            /* this can return EOS if all buffers are outstanding which would
             * be strange because we would expect the upstream element to have
             * allocated them and returned to us.. */
            params.flags = GST_BUFFER_POOL_ACQUIRE_FLAG_DONTWAIT;
            ret = gst_buffer_pool_acquire_buffer (bpool, &to_queue, &params);
            if (ret != GST_FLOW_OK)
              goto acquire_failed;

            ret = gst_v4l2_buffer_pool_prepare_buffer (pool, to_queue, *buf);
            if (ret != GST_FLOW_OK) {
              gst_buffer_unref (to_queue);
              goto prepare_failed;
            }
          }

          if ((ret = gst_v4l2_buffer_pool_qbuf (pool, to_queue)) != GST_FLOW_OK)
            goto queue_failed;

          /* if we are not streaming yet (this is the first buffer, start
           * streaming now */
          if (!gst_v4l2_buffer_pool_streamon (pool)) {
            /* don't check return value because qbuf would have failed */
            gst_v4l2_is_buffer_valid (to_queue, &group);

            /* qbuf has taken the ref of the to_queue buffer but we are no in
             * streaming state, so the flush logic won't be performed.
             * To avoid leaks, flush the allocator and restore the queued
             * buffer as non-queued */
            gst_v4l2_allocator_flush (pool->vallocator);

            pool->buffers[group->buffer.index] = NULL;

            gst_mini_object_set_qdata (GST_MINI_OBJECT (to_queue),
                GST_V4L2_IMPORT_QUARK, NULL, NULL);
            gst_buffer_unref (to_queue);
            g_atomic_int_add (&pool->num_queued, -1);
            goto start_failed;
          }

          if (g_atomic_int_get (&pool->num_queued) >= pool->min_latency) {
            GstBuffer *out;
            /* all buffers are queued, try to dequeue one and release it back
             * into the pool so that _acquire can get to it again. */
            ret = gst_v4l2_buffer_pool_dqbuf (pool, &out);
            if (ret == GST_FLOW_OK)
              /* release the rendered buffer back into the pool. This wakes up any
               * thread waiting for a buffer in _acquire(). */
              gst_buffer_unref (out);
          }
          break;
        }
        default:
          g_assert_not_reached ();
          break;
      }
      break;
    default:
      g_assert_not_reached ();
      break;
  }
done:
  return ret;

  /* ERRORS */
copy_failed:
  {
    GST_ERROR_OBJECT (pool, "failed to copy buffer");
    return ret;
  }
eos:
  {
    GST_DEBUG_OBJECT (pool, "end of stream reached");
    return GST_FLOW_EOS;
  }
acquire_failed:
  {
    if (ret == GST_FLOW_FLUSHING)
      GST_DEBUG_OBJECT (pool, "flushing");
    else
      GST_WARNING_OBJECT (pool, "failed to acquire a buffer: %s",
          gst_flow_get_name (ret));
    return ret;
  }
prepare_failed:
  {
    GST_ERROR_OBJECT (pool, "failed to prepare data");
    return ret;
  }
queue_failed:
  {
    GST_ERROR_OBJECT (pool, "failed to queue buffer");
    return ret;
  }
start_failed:
  {
    GST_ERROR_OBJECT (pool, "failed to start streaming");
    return GST_FLOW_ERROR;
  }
}
예제 #8
0
static void
gst_v4l2_buffer_pool_release_buffer (GstBufferPool * bpool, GstBuffer * buffer)
{
  GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
  GstBufferPoolClass *pclass = GST_BUFFER_POOL_CLASS (parent_class);
  GstV4l2Object *obj = pool->obj;

  GST_DEBUG_OBJECT (pool, "release buffer %p", buffer);

  switch (obj->type) {
    case V4L2_BUF_TYPE_VIDEO_CAPTURE:
    case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
      /* capture, put the buffer back in the queue so that we can refill it
       * later. */
      switch (obj->mode) {
        case GST_V4L2_IO_RW:
          /* release back in the pool */
          pclass->release_buffer (bpool, buffer);
          break;

        case GST_V4L2_IO_DMABUF:
        case GST_V4L2_IO_MMAP:
        case GST_V4L2_IO_USERPTR:
        case GST_V4L2_IO_DMABUF_IMPORT:
        {
          if (gst_v4l2_is_buffer_valid (buffer, NULL)) {
            /* queue back in the device */
            if (pool->other_pool)
              gst_v4l2_buffer_pool_prepare_buffer (pool, buffer, NULL);
            if (gst_v4l2_buffer_pool_qbuf (pool, buffer) != GST_FLOW_OK)
              pclass->release_buffer (bpool, buffer);
          } else {
            /* Simply release invalide/modified buffer, the allocator will
             * give it back later */
            GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY);
            pclass->release_buffer (bpool, buffer);
          }
          break;
        }
        default:
          g_assert_not_reached ();
          break;
      }
      break;

    case V4L2_BUF_TYPE_VIDEO_OUTPUT:
    case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
      switch (obj->mode) {
        case GST_V4L2_IO_RW:
          /* release back in the pool */
          pclass->release_buffer (bpool, buffer);
          break;

        case GST_V4L2_IO_MMAP:
        case GST_V4L2_IO_DMABUF:
        case GST_V4L2_IO_USERPTR:
        case GST_V4L2_IO_DMABUF_IMPORT:
        {
          GstV4l2MemoryGroup *group;
          guint index;

          if (!gst_v4l2_is_buffer_valid (buffer, &group)) {
            /* Simply release invalide/modified buffer, the allocator will
             * give it back later */
            GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY);
            pclass->release_buffer (bpool, buffer);
            break;
          }

          index = group->buffer.index;

          if (pool->buffers[index] == NULL) {
            GST_LOG_OBJECT (pool, "buffer %u not queued, putting on free list",
                index);

            /* Remove qdata, this will unmap any map data in userptr */
            gst_mini_object_set_qdata (GST_MINI_OBJECT (buffer),
                GST_V4L2_IMPORT_QUARK, NULL, NULL);

            /* reset to default size */
            gst_v4l2_allocator_reset_group (pool->vallocator, group);

            /* playback, put the buffer back in the queue to refill later. */
            pclass->release_buffer (bpool, buffer);
          } else {
            /* We keep a ref on queued buffer, so this should never happen */
            g_assert_not_reached ();
          }
          break;
        }

        default:
          g_assert_not_reached ();
          break;
      }
      break;

    default:
      g_assert_not_reached ();
      break;
  }
}
GstMemory *
gst_vaapi_dmabuf_memory_new (GstAllocator * allocator, GstVaapiVideoMeta * meta)
{
  GstMemory *mem;
  GstVaapiDisplay *display;
  GstVaapiSurface *surface;
  GstVaapiSurfaceProxy *proxy;
  GstVaapiBufferProxy *dmabuf_proxy;
  gint dmabuf_fd;
  const GstVideoInfo *vip;
  guint flags;

  g_return_val_if_fail (allocator != NULL, NULL);
  g_return_val_if_fail (meta != NULL, NULL);

  vip = gst_allocator_get_vaapi_video_info (allocator, &flags);
  if (!vip)
    return NULL;

  display = gst_vaapi_video_meta_get_display (meta);
  if (!meta)
    return NULL;

  surface = gst_vaapi_surface_new_full (display, vip, flags);
  if (!surface)
    goto error_create_surface;

  proxy = gst_vaapi_surface_proxy_new (surface);
  if (!proxy)
    goto error_create_surface_proxy;

  dmabuf_proxy = gst_vaapi_surface_get_dma_buf_handle (surface);
  gst_vaapi_object_unref (surface);
  if (!dmabuf_proxy)
    goto error_create_dmabuf_proxy;

  gst_vaapi_video_meta_set_surface_proxy (meta, proxy);
  gst_vaapi_surface_proxy_unref (proxy);

  dmabuf_fd = gst_vaapi_buffer_proxy_get_handle (dmabuf_proxy);
  if (dmabuf_fd < 0 || (dmabuf_fd = dup (dmabuf_fd)) < 0)
    goto error_create_dmabuf_handle;

  mem = gst_dmabuf_allocator_alloc (allocator, dmabuf_fd,
      gst_vaapi_buffer_proxy_get_size (dmabuf_proxy));
  if (!mem)
    goto error_create_dmabuf_memory;

  gst_mini_object_set_qdata (GST_MINI_OBJECT_CAST (mem),
      GST_VAAPI_BUFFER_PROXY_QUARK, dmabuf_proxy,
      (GDestroyNotify) gst_vaapi_buffer_proxy_unref);
  return mem;

  /* ERRORS */
error_create_surface:
  {
    GST_ERROR ("failed to create VA surface (format:%s size:%ux%u)",
        gst_video_format_to_string (GST_VIDEO_INFO_FORMAT (vip)),
        GST_VIDEO_INFO_WIDTH (vip), GST_VIDEO_INFO_HEIGHT (vip));
    return NULL;
  }
error_create_surface_proxy:
  {
    GST_ERROR ("failed to create VA surface proxy");
    gst_vaapi_object_unref (surface);
    return NULL;
  }
error_create_dmabuf_proxy:
  {
    GST_ERROR ("failed to export VA surface to DMABUF");
    gst_vaapi_surface_proxy_unref (proxy);
    return NULL;
  }
error_create_dmabuf_handle:
  {
    GST_ERROR ("failed to duplicate DMABUF handle");
    gst_vaapi_buffer_proxy_unref (dmabuf_proxy);
    return NULL;
  }
error_create_dmabuf_memory:
  {
    GST_ERROR ("failed to create DMABUF memory");
    gst_vaapi_buffer_proxy_unref (dmabuf_proxy);
    return NULL;
  }
}
예제 #10
0
static GstFlowReturn
gst_omx_buffer_pool_alloc_buffer (GstBufferPool * bpool,
    GstBuffer ** buffer, GstBufferPoolAcquireParams * params)
{
  GstOMXBufferPool *pool = GST_OMX_BUFFER_POOL (bpool);
  GstBuffer *buf;
  GstOMXBuffer *omx_buf;

  g_return_val_if_fail (pool->allocating, GST_FLOW_ERROR);

  omx_buf = g_ptr_array_index (pool->port->buffers, pool->current_buffer_index);
  g_return_val_if_fail (omx_buf != NULL, GST_FLOW_ERROR);

  if (pool->other_pool) {
    guint i, n;

    buf = g_ptr_array_index (pool->buffers, pool->current_buffer_index);
    g_assert (pool->other_pool == buf->pool);
    gst_object_replace ((GstObject **) & buf->pool, NULL);

    n = gst_buffer_n_memory (buf);
    for (i = 0; i < n; i++) {
      GstMemory *mem = gst_buffer_peek_memory (buf, i);

      /* FIXME: We don't allow sharing because we need to know
       * when the memory becomes unused and can only then put
       * it back to the pool. Which is done in the pool's release
       * function
       */
      GST_MINI_OBJECT_FLAG_SET (mem, GST_MEMORY_FLAG_NO_SHARE);
    }

    if (pool->add_videometa) {
      GstVideoMeta *meta;

      meta = gst_buffer_get_video_meta (buf);
      if (!meta) {
        gst_buffer_add_video_meta (buf, GST_VIDEO_FRAME_FLAG_NONE,
            GST_VIDEO_INFO_FORMAT (&pool->video_info),
            GST_VIDEO_INFO_WIDTH (&pool->video_info),
            GST_VIDEO_INFO_HEIGHT (&pool->video_info));
      }
    }

    pool->need_copy = FALSE;
  } else {
    GstMemory *mem;
    const guint nstride = pool->port->port_def.format.video.nStride;
    const guint nslice = pool->port->port_def.format.video.nSliceHeight;
    gsize offset[GST_VIDEO_MAX_PLANES] = { 0, };
    gint stride[GST_VIDEO_MAX_PLANES] = { nstride, 0, };

    mem = gst_omx_memory_allocator_alloc (pool->allocator, 0, omx_buf);
    buf = gst_buffer_new ();
    gst_buffer_append_memory (buf, mem);
    g_ptr_array_add (pool->buffers, buf);

    switch (GST_VIDEO_INFO_FORMAT (&pool->video_info)) {
      case GST_VIDEO_FORMAT_ABGR:
      case GST_VIDEO_FORMAT_ARGB:
      case GST_VIDEO_FORMAT_RGB16:
      case GST_VIDEO_FORMAT_BGR16:
      case GST_VIDEO_FORMAT_YUY2:
      case GST_VIDEO_FORMAT_UYVY:
      case GST_VIDEO_FORMAT_YVYU:
      case GST_VIDEO_FORMAT_GRAY8:
        break;
      case GST_VIDEO_FORMAT_I420:
        stride[1] = nstride / 2;
        offset[1] = offset[0] + stride[0] * nslice;
        stride[2] = nstride / 2;
        offset[2] = offset[1] + (stride[1] * nslice / 2);
        break;
      case GST_VIDEO_FORMAT_NV12:
      case GST_VIDEO_FORMAT_NV16:
        stride[1] = nstride;
        offset[1] = offset[0] + stride[0] * nslice;
        break;
      default:
        g_assert_not_reached ();
        break;
    }

    if (pool->add_videometa) {
      pool->need_copy = FALSE;
    } else {
      GstVideoInfo info;
      gboolean need_copy = FALSE;
      gint i;

      gst_video_info_init (&info);
      gst_video_info_set_format (&info,
          GST_VIDEO_INFO_FORMAT (&pool->video_info),
          GST_VIDEO_INFO_WIDTH (&pool->video_info),
          GST_VIDEO_INFO_HEIGHT (&pool->video_info));

      for (i = 0; i < GST_VIDEO_INFO_N_PLANES (&pool->video_info); i++) {
        if (info.stride[i] != stride[i] || info.offset[i] != offset[i]) {
          need_copy = TRUE;
          break;
        }
      }

      pool->need_copy = need_copy;
    }

    if (pool->need_copy || pool->add_videometa) {
      /* We always add the videometa. It's the job of the user
       * to copy the buffer if pool->need_copy is TRUE
       */
      gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE,
          GST_VIDEO_INFO_FORMAT (&pool->video_info),
          GST_VIDEO_INFO_WIDTH (&pool->video_info),
          GST_VIDEO_INFO_HEIGHT (&pool->video_info),
          GST_VIDEO_INFO_N_PLANES (&pool->video_info), offset, stride);
    }
  }

  gst_mini_object_set_qdata (GST_MINI_OBJECT_CAST (buf),
      gst_omx_buffer_data_quark, omx_buf, NULL);

  *buffer = buf;

  pool->current_buffer_index++;

  return GST_FLOW_OK;
}