/* the default implementation for preallocating the buffers * in the pool */ static gboolean default_start (GstBufferPool * pool) { guint i; GstBufferPoolPrivate *priv = pool->priv; GstBufferPoolClass *pclass; pclass = GST_BUFFER_POOL_GET_CLASS (pool); /* we need to prealloc buffers */ for (i = 0; i < priv->min_buffers; i++) { GstBuffer *buffer; if (do_alloc_buffer (pool, &buffer, NULL) != GST_FLOW_OK) goto alloc_failed; /* release to the queue, we call the vmethod directly, we don't need to do * the other refcount handling right now. */ if (G_LIKELY (pclass->release_buffer)) pclass->release_buffer (pool, buffer); } return TRUE; /* ERRORS */ alloc_failed: { GST_WARNING_OBJECT (pool, "failed to allocate buffer"); return FALSE; } }
/** * gst_buffer_pool_release_buffer: * @pool: a #GstBufferPool * @buffer: (transfer full): a #GstBuffer * * Release @buffer to @pool. @buffer should have previously been allocated from * @pool with gst_buffer_pool_acquire_buffer(). * * This function is usually called automatically when the last ref on @buffer * disappears. */ void gst_buffer_pool_release_buffer (GstBufferPool * pool, GstBuffer * buffer) { GstBufferPoolClass *pclass; g_return_if_fail (GST_IS_BUFFER_POOL (pool)); g_return_if_fail (buffer != NULL); /* check that the buffer is ours, all buffers returned to the pool have the * pool member set to NULL and the pool refcount decreased */ if (!g_atomic_pointer_compare_and_exchange (&buffer->pool, pool, NULL)) return; pclass = GST_BUFFER_POOL_GET_CLASS (pool); /* reset the buffer when needed */ if (G_LIKELY (pclass->reset_buffer)) pclass->reset_buffer (pool, buffer); if (G_LIKELY (pclass->release_buffer)) pclass->release_buffer (pool, buffer); dec_outstanding (pool); /* decrease the refcount that the buffer had to us */ gst_object_unref (pool); }
static gboolean gst_v4l2_buffer_pool_stop (GstBufferPool * bpool) { GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool); GstBufferPoolClass *pclass = GST_BUFFER_POOL_CLASS (parent_class); gboolean ret; gint i; GST_DEBUG_OBJECT (pool, "stopping pool"); if (pool->group_released_handler > 0) { g_signal_handler_disconnect (pool->vallocator, pool->group_released_handler); pool->group_released_handler = 0; } if (pool->other_pool) { gst_object_unref (pool->other_pool); pool->other_pool = NULL; } if (!gst_v4l2_buffer_pool_streamoff (pool)) goto streamoff_failed; if (pool->vallocator) gst_v4l2_allocator_flush (pool->vallocator); for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (pool->buffers[i]) { GstBuffer *buffer = pool->buffers[i]; pool->buffers[i] = NULL; if (V4L2_TYPE_IS_OUTPUT (pool->obj->type)) gst_buffer_unref (buffer); else pclass->release_buffer (bpool, buffer); g_atomic_int_add (&pool->num_queued, -1); } } ret = GST_BUFFER_POOL_CLASS (parent_class)->stop (bpool); if (ret && pool->vallocator) { GstV4l2Return vret; vret = gst_v4l2_allocator_stop (pool->vallocator); if (vret == GST_V4L2_BUSY) GST_WARNING_OBJECT (pool, "some buffers are still outstanding"); ret = (vret == GST_V4L2_OK); } return ret; /* ERRORS */ streamoff_failed: GST_ERROR_OBJECT (pool, "device refused to stop streaming"); return FALSE; }
static void gst_v4l2_buffer_pool_release_buffer (GstBufferPool * bpool, GstBuffer * buffer) { GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool); GstBufferPoolClass *pclass = GST_BUFFER_POOL_CLASS (parent_class); GstV4l2Object *obj = pool->obj; GST_DEBUG_OBJECT (pool, "release buffer %p", buffer); switch (obj->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: /* capture, put the buffer back in the queue so that we can refill it * later. */ switch (obj->mode) { case GST_V4L2_IO_RW: /* release back in the pool */ pclass->release_buffer (bpool, buffer); break; case GST_V4L2_IO_DMABUF: case GST_V4L2_IO_MMAP: case GST_V4L2_IO_USERPTR: case GST_V4L2_IO_DMABUF_IMPORT: { if (gst_v4l2_is_buffer_valid (buffer, NULL)) { /* queue back in the device */ if (pool->other_pool) gst_v4l2_buffer_pool_prepare_buffer (pool, buffer, NULL); if (gst_v4l2_buffer_pool_qbuf (pool, buffer) != GST_FLOW_OK) pclass->release_buffer (bpool, buffer); } else { /* Simply release invalide/modified buffer, the allocator will * give it back later */ GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY); pclass->release_buffer (bpool, buffer); } break; } default: g_assert_not_reached (); break; } break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: switch (obj->mode) { case GST_V4L2_IO_RW: /* release back in the pool */ pclass->release_buffer (bpool, buffer); break; case GST_V4L2_IO_MMAP: case GST_V4L2_IO_DMABUF: case GST_V4L2_IO_USERPTR: case GST_V4L2_IO_DMABUF_IMPORT: { GstV4l2MemoryGroup *group; guint index; if (!gst_v4l2_is_buffer_valid (buffer, &group)) { /* Simply release invalide/modified buffer, the allocator will * give it back later */ GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY); pclass->release_buffer (bpool, buffer); break; } index = group->buffer.index; if (pool->buffers[index] == NULL) { GST_LOG_OBJECT (pool, "buffer %u not queued, putting on free list", index); /* Remove qdata, this will unmap any map data in userptr */ gst_mini_object_set_qdata (GST_MINI_OBJECT (buffer), GST_V4L2_IMPORT_QUARK, NULL, NULL); /* reset to default size */ gst_v4l2_allocator_reset_group (pool->vallocator, group); /* playback, put the buffer back in the queue to refill later. */ pclass->release_buffer (bpool, buffer); } else { /* We keep a ref on queued buffer, so this should never happen */ g_assert_not_reached (); } break; } default: g_assert_not_reached (); break; } break; default: g_assert_not_reached (); break; } }