static gboolean gst_imx_ipu_blitter_allocate_internal_fill_frame(GstImxIpuBlitter *ipu_blitter) { GstImxPhysMemory *phys_mem; /* Not using the dma bufferpool for this, since that bufferpool will * be configured to input frame sizes. Plus, the pool wouldn't yield any benefits here. */ ipu_blitter->fill_frame = gst_buffer_new_allocate( ipu_blitter->allocator, fill_frame_width * fill_frame_height * gst_imx_ipu_video_bpp(format), NULL ); if (ipu_blitter->fill_frame == NULL) { GST_ERROR_OBJECT(ipu_blitter, "could not allocate internal fill frame"); return FALSE; } phys_mem = (GstImxPhysMemory *)gst_buffer_peek_memory(ipu_blitter->fill_frame, 0); memset(&(ipu_blitter->priv->fill_task), 0, sizeof(struct ipu_task)); ipu_blitter->priv->fill_task.input.crop.pos.x = 0; ipu_blitter->priv->fill_task.input.crop.pos.y = 0; ipu_blitter->priv->fill_task.input.crop.w = fill_frame_width; ipu_blitter->priv->fill_task.input.crop.h = fill_frame_height; ipu_blitter->priv->fill_task.input.width = fill_frame_width; ipu_blitter->priv->fill_task.input.height = fill_frame_height; ipu_blitter->priv->fill_task.input.paddr = (dma_addr_t)(phys_mem->phys_addr); ipu_blitter->priv->fill_task.input.format = gst_imx_ipu_blitter_get_v4l_format(format); return TRUE; }
bool gst_vlc_picture_plane_allocator_hold( GstVlcPicturePlaneAllocator *p_allocator, GstBuffer *p_buffer ) { picture_t* p_pic = NULL; decoder_t* p_dec = p_allocator->p_dec; GstVlcPicturePlane *p_mem; int i_plane; if( !decoder_UpdateVideoFormat( p_dec ) ) p_pic = decoder_NewPicture( p_dec ); if( !p_pic ) { msg_Err( p_allocator->p_dec, "failed to acquire picture from vout" ); return false; } for( i_plane = 0; i_plane < p_pic->i_planes; i_plane++ ) { p_mem = (GstVlcPicturePlane*) gst_buffer_peek_memory ( p_buffer, i_plane ); p_mem->p_pic = p_pic; p_mem->p_plane = &p_pic->p[ i_plane ]; } return true; }
gboolean gst_video_meta_unmap_vaapi_memory (GstVideoMeta * meta, guint plane, GstMapInfo * info) { GstVaapiVideoMemory *const mem = GST_VAAPI_VIDEO_MEMORY_CAST (gst_buffer_peek_memory (meta->buffer, 0)); g_return_val_if_fail (mem, FALSE); g_return_val_if_fail (GST_VAAPI_IS_VIDEO_ALLOCATOR (mem->parent_instance. allocator), FALSE); g_return_val_if_fail (mem->meta, FALSE); g_return_val_if_fail (mem->surface, FALSE); g_return_val_if_fail (mem->image, FALSE); if (--mem->map_count == 0) { mem->map_type = 0; /* Unmap VA image used for read/writes */ if (info->flags & GST_MAP_READWRITE) { gst_vaapi_image_unmap (mem->image); if (info->flags & GST_MAP_WRITE) { GST_VAAPI_VIDEO_MEMORY_FLAG_SET (mem, GST_VAAPI_VIDEO_MEMORY_FLAG_IMAGE_IS_CURRENT); } } } return TRUE; }
static void update_image (APP_STATE_T * state, GstBuffer * buffer) { GstVideoGLTextureUploadMeta *meta = NULL; if (state->current_buffer) { gst_buffer_unref (state->current_buffer); } else { /* Setup the model world */ init_model_proj (state); TRACE_VC_MEMORY ("after init_model_proj"); /* initialize the OGLES texture(s) */ init_textures (state, buffer); TRACE_VC_MEMORY ("after init_textures"); } state->current_buffer = gst_buffer_ref (buffer); TRACE_VC_MEMORY_ONCE_FOR_ID ("before GstVideoGLTextureUploadMeta", gid0); if (state->can_avoid_upload) { GstMemory *mem = gst_buffer_peek_memory (state->current_buffer, 0); g_assert (gst_is_gl_memory (mem)); state->tex = ((GstGLMemory *) mem)->tex_id; } else if ((meta = gst_buffer_get_video_gl_texture_upload_meta (buffer))) { if (meta->n_textures == 1) { guint ids[4] = { state->tex, 0, 0, 0 }; if (!gst_video_gl_texture_upload_meta_upload (meta, ids)) { GST_WARNING ("failed to upload to texture"); } } } TRACE_VC_MEMORY_ONCE_FOR_ID ("after GstVideoGLTextureUploadMeta", gid1); }
static GstFlowReturn gst_omx_buffer_pool_acquire_buffer (GstBufferPool * bpool, GstBuffer ** buffer, GstBufferPoolAcquireParams * params) { GstFlowReturn ret; GstOMXBufferPool *pool = GST_OMX_BUFFER_POOL (bpool); if (pool->port->port_def.eDir == OMX_DirOutput) { GstBuffer *buf; g_return_val_if_fail (pool->current_buffer_index != -1, GST_FLOW_ERROR); buf = g_ptr_array_index (pool->buffers, pool->current_buffer_index); g_return_val_if_fail (buf != NULL, GST_FLOW_ERROR); *buffer = buf; ret = GST_FLOW_OK; /* If it's our own memory we have to set the sizes */ if (!pool->other_pool) { GstMemory *mem = gst_buffer_peek_memory (*buffer, 0); g_assert (mem && g_strcmp0 (mem->allocator->mem_type, GST_OMX_MEMORY_TYPE) == 0); mem->size = ((GstOMXMemory *) mem)->buf->omx_buf->nFilledLen; mem->offset = ((GstOMXMemory *) mem)->buf->omx_buf->nOffset; } } else { /* Acquire any buffer that is available to be filled by upstream */ ret = GST_BUFFER_POOL_CLASS (gst_omx_buffer_pool_parent_class)->acquire_buffer (bpool, buffer, params); } return ret; }
static GstFlowReturn gst_egl_image_buffer_pool_acquire_buffer (GstBufferPool * bpool, GstBuffer ** buffer, GstBufferPoolAcquireParams * params) { GstFlowReturn ret; GstEGLImageBufferPool *pool; ret = GST_BUFFER_POOL_CLASS (gst_egl_image_buffer_pool_parent_class)->acquire_buffer (bpool, buffer, params); if (ret != GST_FLOW_OK || !*buffer) return ret; pool = GST_EGL_IMAGE_BUFFER_POOL (bpool); /* XXX: Don't return the memory we just rendered, glEGLImageTargetTexture2DOES() * keeps the EGLImage unmappable until the next one is uploaded */ if (*buffer && gst_buffer_peek_memory (*buffer, 0) == pool->state->current_mem) { GstBuffer *oldbuf = *buffer; ret = GST_BUFFER_POOL_CLASS (gst_egl_image_buffer_pool_parent_class)->acquire_buffer (bpool, buffer, params); gst_object_replace ((GstObject **) & oldbuf->pool, (GstObject *) pool); gst_buffer_unref (oldbuf); } return ret; }
static void gst_imx_ipu_blitter_init_dummy_black_buffer(GstImxIpuBlitter *ipu_blitter) { GstVideoInfo video_info; gst_video_info_init(&video_info); gst_video_info_set_format(&video_info, GST_VIDEO_FORMAT_RGBx, 64, 64); ipu_blitter->dummy_black_buffer = gst_buffer_new_allocate(ipu_blitter->allocator, GST_VIDEO_INFO_SIZE(&video_info), NULL); gst_buffer_memset(ipu_blitter->dummy_black_buffer, 0, 0, GST_VIDEO_INFO_SIZE(&video_info)); gst_buffer_add_video_meta_full( ipu_blitter->dummy_black_buffer, GST_VIDEO_FRAME_FLAG_NONE, GST_VIDEO_INFO_FORMAT(&video_info), GST_VIDEO_INFO_WIDTH(&video_info), GST_VIDEO_INFO_HEIGHT(&video_info), GST_VIDEO_INFO_N_PLANES(&video_info), &(GST_VIDEO_INFO_PLANE_OFFSET(&video_info, 0)), &(GST_VIDEO_INFO_PLANE_STRIDE(&video_info, 0)) ); { GstImxPhysMemory *imx_phys_mem_mem = (GstImxPhysMemory *)gst_buffer_peek_memory(ipu_blitter->dummy_black_buffer, 0); GstImxPhysMemMeta *phys_mem_meta = (GstImxPhysMemMeta *)GST_IMX_PHYS_MEM_META_ADD(ipu_blitter->dummy_black_buffer); phys_mem_meta->phys_addr = imx_phys_mem_mem->phys_addr; } }
gboolean gst_vulkan_swapper_render_buffer (GstVulkanSwapper * swapper, GstBuffer * buffer, GError ** error) { GstMemory *mem; gboolean ret; mem = gst_buffer_peek_memory (buffer, 0); if (!mem) { g_set_error_literal (error, GST_VULKAN_ERROR, VK_ERROR_FORMAT_NOT_SUPPORTED, "Buffer has no memory"); return FALSE; } if (!gst_is_vulkan_buffer_memory (mem)) { g_set_error_literal (error, GST_VULKAN_ERROR, VK_ERROR_FORMAT_NOT_SUPPORTED, "Incorrect memory type"); return FALSE; } RENDER_LOCK (swapper); ret = _render_buffer_unlocked (swapper, buffer, error); RENDER_UNLOCK (swapper); return ret; }
static gboolean plugin_bind_dma_to_vaapi_buffer (GstVaapiPluginBase * plugin, GstBuffer * inbuf, GstBuffer * outbuf) { GstVideoInfo *const vip = &plugin->sinkpad_info; GstVaapiVideoMeta *meta; GstVaapiSurface *surface; GstVaapiSurfaceProxy *proxy; gint fd; fd = gst_dmabuf_memory_get_fd (gst_buffer_peek_memory (inbuf, 0)); if (fd < 0) return FALSE; if (!plugin_update_sinkpad_info_from_buffer (plugin, inbuf)) goto error_update_sinkpad_info; meta = gst_buffer_get_vaapi_video_meta (outbuf); g_return_val_if_fail (meta != NULL, FALSE); /* Check for a VASurface cached in the buffer */ surface = _get_cached_surface (inbuf); if (!surface) { /* otherwise create one and cache it */ surface = gst_vaapi_surface_new_with_dma_buf_handle (plugin->display, fd, vip); if (!surface) goto error_create_surface; _set_cached_surface (inbuf, surface); } proxy = gst_vaapi_surface_proxy_new (surface); if (!proxy) goto error_create_proxy; gst_vaapi_video_meta_set_surface_proxy (meta, proxy); gst_vaapi_surface_proxy_unref (proxy); gst_buffer_add_parent_buffer_meta (outbuf, inbuf); return TRUE; /* ERRORS */ error_update_sinkpad_info: { GST_ERROR_OBJECT (plugin, "failed to update sink pad video info from video meta"); return FALSE; } error_create_surface: { GST_ERROR_OBJECT (plugin, "failed to create VA surface from dma_buf handle"); return FALSE; } error_create_proxy: { GST_ERROR_OBJECT (plugin, "failed to create VA surface proxy from wrapped VA surface"); return FALSE; } }
/** * This should get called by the pool when the buffer is returned to it. * We need to release the reference that the GstMemory is holding on the MMAL * buffer header, or it won't be returned to the (MMAL) pool. */ static void gst_mmal_opaque_buffer_pool_reset_buffer (GstBufferPool * pool, GstBuffer * buffer) { gst_mmal_opaque_mem_set_mmal_header (gst_buffer_peek_memory (buffer, 0), NULL); GST_BUFFER_POOL_CLASS (parent_class)->reset_buffer (pool, buffer); }
void moz_gfx_buffer_pool_reset_buffer (GstBufferPool* aPool, GstBuffer* aBuffer) { GstMemory* mem = gst_buffer_peek_memory(aBuffer, 0); NS_ASSERTION(GST_IS_MOZ_GFX_MEMORY_ALLOCATOR(mem->allocator), "Should be a gfx image"); moz_gfx_memory_reset((MozGfxMemory *) mem); GST_BUFFER_POOL_CLASS(moz_gfx_buffer_pool_parent_class)->reset_buffer(aPool, aBuffer); }
static void gst_vaapi_video_buffer_pool_reset_buffer (GstBufferPool * pool, GstBuffer * buffer) { GstMemory *const mem = gst_buffer_peek_memory (buffer, 0); /* Release the underlying surface proxy */ if (GST_VAAPI_IS_VIDEO_MEMORY (mem)) gst_vaapi_video_memory_reset_surface (GST_VAAPI_VIDEO_MEMORY_CAST (mem)); GST_BUFFER_POOL_CLASS (gst_vaapi_video_buffer_pool_parent_class)->reset_buffer (pool, buffer); }
static gboolean is_dma_buffer (GstBuffer * buf) { GstMemory *mem; if (gst_buffer_n_memory (buf) < 1) return FALSE; mem = gst_buffer_peek_memory (buf, 0); if (!mem || !gst_is_dmabuf_memory (mem)) return FALSE; return TRUE; }
void gst_vlc_picture_plane_allocator_release( GstVlcPicturePlaneAllocator *p_allocator, GstBuffer *p_buffer ) { VLC_UNUSED( p_allocator ); GstVlcPicturePlane* p_mem = (GstVlcPicturePlane*) gst_buffer_peek_memory( p_buffer, 0 ); guint i_plane; if( p_mem->p_pic ) { picture_Release( p_mem->p_pic ); for( i_plane = 0; i_plane < gst_buffer_n_memory( p_buffer ); i_plane++ ) { p_mem = (GstVlcPicturePlane*) gst_buffer_peek_memory ( p_buffer, i_plane ); p_mem->p_pic = NULL; p_mem->p_plane = NULL; } } }
static gboolean ensure_surface_proxy (GstVaapiVideoMeta * meta) { if (!meta->proxy) return FALSE; if (meta->buffer) { GstMemory *const mem = gst_buffer_peek_memory (meta->buffer, 0); if (GST_VAAPI_IS_VIDEO_MEMORY (mem)) return gst_vaapi_video_memory_sync (GST_VAAPI_VIDEO_MEMORY_CAST (mem)); } return TRUE; }
static gboolean eglimage_video_unmap (GstVideoMeta * meta, guint plane, GstMapInfo * info) { GstMemory *gmem; GstEGLImageMemory *mem; EGLint attribs[] = { MALI_EGL_IMAGE_PLANE, MALI_EGL_IMAGE_PLANE_Y, EGL_NONE }; if (gst_buffer_n_memory (meta->buffer) != 1) return default_unmap_video (meta, plane, info); gmem = gst_buffer_peek_memory (meta->buffer, 0); if (strcmp (gmem->allocator->mem_type, GST_EGL_IMAGE_MEMORY_NAME) != 0) return default_unmap_video (meta, plane, info); mem = GST_EGL_IMAGE_MEMORY ((gmem->parent ? gmem->parent : gmem)); g_mutex_lock (&mem->lock); if (mem->format == GST_VIDEO_FORMAT_YV12) { if (plane == 1) plane = 2; else if (plane == 2) plane = 1; } if (!mem->memory_refcount[plane]) { g_mutex_unlock (&mem->lock); g_return_val_if_reached (FALSE); } mem->memory_refcount[plane]--; if (mem->memory_refcount[plane] > 0) { g_mutex_unlock (&mem->lock); return TRUE; } /* Unmaps automatically */ if (mem->memory_platform_data[plane]) { mali_egl_image_unmap_buffer (mem->image[plane], attribs); mali_egl_image_unlock_ptr (mem->image[plane]); } mem->memory[plane] = NULL; mem->memory_platform_data[plane] = NULL; g_mutex_unlock (&mem->lock); return TRUE; }
static gboolean gst_v4l2_is_buffer_valid (GstBuffer * buffer, GstV4l2MemoryGroup ** out_group) { GstMemory *mem = gst_buffer_peek_memory (buffer, 0); gboolean valid = FALSE; if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY)) goto done; if (gst_is_dmabuf_memory (mem)) mem = gst_mini_object_get_qdata (GST_MINI_OBJECT (mem), GST_V4L2_MEMORY_QUARK); if (mem && gst_is_v4l2_memory (mem)) { GstV4l2Memory *vmem = (GstV4l2Memory *) mem; GstV4l2MemoryGroup *group = vmem->group; gint i; if (group->n_mem != gst_buffer_n_memory (buffer)) goto done; for (i = 0; i < group->n_mem; i++) { if (group->mem[i] != gst_buffer_peek_memory (buffer, i)) goto done; if (!gst_memory_is_writable (group->mem[i])) goto done; } valid = TRUE; if (out_group) *out_group = group; } done: return valid; }
static gboolean plugin_bind_dma_to_vaapi_buffer (GstVaapiPluginBase * plugin, GstBuffer * inbuf, GstBuffer * outbuf) { GstVideoInfo *const vip = &plugin->sinkpad_info; GstVaapiVideoMeta *meta; GstVaapiSurface *surface; GstVaapiSurfaceProxy *proxy; gint fd; fd = gst_dmabuf_memory_get_fd (gst_buffer_peek_memory (inbuf, 0)); if (fd < 0) return FALSE; if (!plugin_update_sinkpad_info_from_buffer (plugin, inbuf)) goto error_update_sinkpad_info; meta = gst_buffer_get_vaapi_video_meta (outbuf); g_return_val_if_fail (meta != NULL, FALSE); surface = gst_vaapi_surface_new_with_dma_buf_handle (plugin->display, fd, GST_VIDEO_INFO_SIZE (vip), GST_VIDEO_INFO_FORMAT (vip), GST_VIDEO_INFO_WIDTH (vip), GST_VIDEO_INFO_HEIGHT (vip), vip->offset, vip->stride); if (!surface) goto error_create_surface; proxy = gst_vaapi_surface_proxy_new (surface); gst_vaapi_object_unref (surface); if (!proxy) goto error_create_proxy; gst_vaapi_surface_proxy_set_destroy_notify (proxy, (GDestroyNotify) gst_buffer_unref, (gpointer) gst_buffer_ref (inbuf)); gst_vaapi_video_meta_set_surface_proxy (meta, proxy); gst_vaapi_surface_proxy_unref (proxy); return TRUE; /* ERRORS */ error_update_sinkpad_info: GST_ERROR ("failed to update sink pad video info from video meta"); return FALSE; error_create_surface: GST_ERROR ("failed to create VA surface from dma_buf handle"); return FALSE; error_create_proxy: GST_ERROR ("failed to create VA surface proxy from wrapped VA surface"); return FALSE; }
/* Update the buffer used to draw black borders. When we have viewporter * support, this is a scaled up 1x1 image, and without we need an black image * the size of the rendering areay. */ static void gst_wl_window_update_borders (GstWlWindow * window) { GstVideoFormat format; GstVideoInfo info; gint width, height; GstBuffer *buf; struct wl_buffer *wlbuf; GstWlBuffer *gwlbuf; GstAllocator *alloc; if (window->no_border_update) return; if (window->display->viewporter) { width = height = 1; window->no_border_update = TRUE; } else { width = window->render_rectangle.w; height = window->render_rectangle.h; } /* we want WL_SHM_FORMAT_XRGB8888 */ #if G_BYTE_ORDER == G_BIG_ENDIAN format = GST_VIDEO_FORMAT_xRGB; #else format = GST_VIDEO_FORMAT_BGRx; #endif /* draw the area_subsurface */ gst_video_info_set_format (&info, format, width, height); alloc = gst_wl_shm_allocator_get (); buf = gst_buffer_new_allocate (alloc, info.size, NULL); gst_buffer_memset (buf, 0, 0, info.size); wlbuf = gst_wl_shm_memory_construct_wl_buffer (gst_buffer_peek_memory (buf, 0), window->display, &info); gwlbuf = gst_buffer_add_wl_buffer (buf, wlbuf, window->display); gst_wl_buffer_attach (gwlbuf, window->area_surface_wrapper); /* at this point, the GstWlBuffer keeps the buffer * alive and will free it on wl_buffer::release */ gst_buffer_unref (buf); g_object_unref (alloc); }
static gboolean _do_download (GstGLDownload * download, guint texture_id, gpointer data[GST_VIDEO_MAX_PLANES]) { guint out_width, out_height; GstBuffer *inbuf, *outbuf; GstMapInfo map_info; gboolean ret = TRUE; gint i; out_width = GST_VIDEO_INFO_WIDTH (&download->info); out_height = GST_VIDEO_INFO_HEIGHT (&download->info); if (!download->initted) { if (!_init_download (download)) return FALSE; } GST_TRACE ("doing download of texture:%u (%ux%u)", download->priv->in_tex[0]->tex_id, out_width, out_height); inbuf = gst_buffer_new (); gst_buffer_append_memory (inbuf, gst_memory_ref ((GstMemory *) download->priv->in_tex[0])); outbuf = gst_gl_color_convert_perform (download->convert, inbuf); if (!outbuf) return FALSE; for (i = 0; i < GST_VIDEO_INFO_N_PLANES (&download->info); i++) { GstMemory *out_mem = gst_buffer_peek_memory (outbuf, i); gpointer temp_data = ((GstGLMemory *) out_mem)->data; ((GstGLMemory *) out_mem)->data = data[i]; if (!gst_memory_map (out_mem, &map_info, GST_MAP_READ)) { GST_ERROR_OBJECT (download, "Failed to map memory"); ret = FALSE; } gst_memory_unmap (out_mem, &map_info); ((GstGLMemory *) out_mem)->data = temp_data; } gst_buffer_unref (inbuf); gst_buffer_unref (outbuf); return ret; }
void GStreamerReader::CopyIntoImageBuffer(GstBuffer* aBuffer, GstBuffer** aOutBuffer, nsRefPtr<PlanarYCbCrImage> &image) { *aOutBuffer = gst_buffer_new_allocate(mAllocator, gst_buffer_get_size(aBuffer), nullptr); GstMemory *mem = gst_buffer_peek_memory(*aOutBuffer, 0); GstMapInfo map_info; gst_memory_map(mem, &map_info, GST_MAP_WRITE); gst_buffer_extract(aBuffer, 0, map_info.data, gst_buffer_get_size(aBuffer)); gst_memory_unmap(mem, &map_info); /* create a new gst buffer with the newly created memory and copy the * metadata over from the incoming buffer */ gst_buffer_copy_into(*aOutBuffer, aBuffer, (GstBufferCopyFlags)(GST_BUFFER_COPY_METADATA), 0, -1); image = GetImageFromBuffer(*aOutBuffer); }
static GstFlowReturn gst_v4l2_buffer_pool_import_dmabuf (GstV4l2BufferPool * pool, GstBuffer * dest, GstBuffer * src) { GstV4l2MemoryGroup *group = NULL; GstMemory *dma_mem[GST_VIDEO_MAX_PLANES] = { 0 }; guint n_mem = gst_buffer_n_memory (src); gint i; GST_LOG_OBJECT (pool, "importing dmabuf"); if (!gst_v4l2_is_buffer_valid (dest, &group)) goto not_our_buffer; if (n_mem > GST_VIDEO_MAX_PLANES) goto too_many_mems; for (i = 0; i < n_mem; i++) dma_mem[i] = gst_buffer_peek_memory (src, i); if (!gst_v4l2_allocator_import_dmabuf (pool->vallocator, group, n_mem, dma_mem)) goto import_failed; gst_mini_object_set_qdata (GST_MINI_OBJECT (dest), GST_V4L2_IMPORT_QUARK, gst_buffer_ref (src), (GDestroyNotify) gst_buffer_unref); return GST_FLOW_OK; not_our_buffer: { GST_ERROR_OBJECT (pool, "destination buffer invalid or not from our pool"); return GST_FLOW_ERROR; } too_many_mems: { GST_ERROR_OBJECT (pool, "could not map buffer"); return GST_FLOW_ERROR; } import_failed: { GST_ERROR_OBJECT (pool, "failed to import dmabuf"); return GST_FLOW_ERROR; } }
static void update_image (APP_STATE_T * state, GstBuffer * buffer) { GstMemory *mem = gst_buffer_peek_memory (buffer, 0); if (state->current_mem) { gst_memory_unref (state->current_mem); } state->current_mem = gst_memory_ref (mem); TRACE_VC_MEMORY_ONCE_FOR_ID ("before glEGLImageTargetTexture2DOES", gid0); glBindTexture (GL_TEXTURE_2D, state->tex); glEGLImageTargetTexture2DOES (GL_TEXTURE_2D, gst_egl_image_memory_get_image (mem)); TRACE_VC_MEMORY_ONCE_FOR_ID ("after glEGLImageTargetTexture2DOES", gid1); }
static void gst_mir_buffer_pool_release_buffer (GstBufferPool * pool, GstBuffer * buffer) { #if 1 GstMemory *mem = { NULL }; int err = 0; MediaCodecDelegate delegate; /* Get access to the GstMemory stored in the GstBuffer */ if (gst_buffer_n_memory (buffer) >= 1 && (mem = gst_buffer_peek_memory (buffer, 0)) && gst_is_mir_image_memory (mem)) { GST_DEBUG_OBJECT (pool, "It is Mir image memory"); } else GST_DEBUG_OBJECT (pool, "It is NOT Mir image memory"); delegate = gst_mir_image_memory_get_codec (mem); if (!delegate) { GST_WARNING_OBJECT (pool, "delegate is NULL, rendering will not function"); goto done; } GST_DEBUG_OBJECT (pool, "mem: %p", mem); GST_DEBUG_OBJECT (pool, "gst_mir_image_memory_get_codec (mem): %p", delegate); GST_DEBUG_OBJECT (pool, "gst_mir_image_memory_get_buffer_index (mem): %d", gst_mir_image_memory_get_buffer_index (mem)); GST_DEBUG_OBJECT (pool, "Rendering buffer: %d", gst_mir_image_memory_get_buffer_index (mem)); GST_DEBUG_OBJECT (pool, "Releasing output buffer index: %d", gst_mir_image_memory_get_buffer_index (mem)); /* Render and release the output buffer back to the decoder */ err = media_codec_release_output_buffer (delegate, gst_mir_image_memory_get_buffer_index (mem)); if (err < 0) GST_WARNING_OBJECT (pool, "Failed to release output buffer. Rendering will probably be affected (err: %d).", err); #endif done: GST_BUFFER_POOL_CLASS (parent_class)->release_buffer (pool, buffer); }
nsRefPtr<PlanarYCbCrImage> GStreamerReader::GetImageFromBuffer(GstBuffer* aBuffer) { nsRefPtr<PlanarYCbCrImage> image = nullptr; if (gst_buffer_n_memory(aBuffer) == 1) { GstMemory* mem = gst_buffer_peek_memory(aBuffer, 0); if (GST_IS_MOZ_GFX_MEMORY_ALLOCATOR(mem->allocator)) { image = moz_gfx_memory_get_image(mem); GstVideoFrame frame; gst_video_frame_map(&frame, &mVideoInfo, aBuffer, GST_MAP_READ); PlanarYCbCrImage::Data data; ImageDataFromVideoFrame(&frame, &data); image->SetDataNoCopy(data); gst_video_frame_unmap(&frame); } } return image; }
static gboolean gst_eglimage_to_gl_texture_upload_meta (GstVideoGLTextureUploadMeta * meta, guint texture_id[4]) { gint i = 0; gint n = 0; g_return_val_if_fail (meta != NULL, FALSE); g_return_val_if_fail (texture_id != NULL, FALSE); GST_DEBUG ("Uploading for meta with textures %i,%i,%i,%i", texture_id[0], texture_id[1], texture_id[2], texture_id[3]); n = gst_buffer_n_memory (meta->buffer); for (i = 0; i < n; i++) { GstMemory *mem = gst_buffer_peek_memory (meta->buffer, i); const GstGLFuncs *gl = NULL; if (!gst_is_egl_image_memory (mem)) { GST_WARNING ("memory %p does not hold an EGLImage", mem); return FALSE; } gl = GST_GL_CONTEXT (GST_EGL_IMAGE_MEMORY (mem)->context)->gl_vtable; gl->ActiveTexture (GL_TEXTURE0 + i); gl->BindTexture (GL_TEXTURE_2D, texture_id[i]); gl->EGLImageTargetTexture2D (GL_TEXTURE_2D, gst_egl_image_memory_get_image (mem)); } if (GST_IS_GL_BUFFER_POOL (meta->buffer->pool)) gst_gl_buffer_pool_replace_last_buffer (GST_GL_BUFFER_POOL (meta-> buffer->pool), meta->buffer); return TRUE; }
static gsize fill_vectors (struct iovec *vecs, GstMapInfo * maps, guint n, GstBuffer * buf) { GstMemory *mem; gsize size = 0; guint i; g_assert (gst_buffer_n_memory (buf) == n); for (i = 0; i < n; ++i) { mem = gst_buffer_peek_memory (buf, i); if (gst_memory_map (mem, &maps[i], GST_MAP_READ)) { vecs[i].iov_base = maps[i].data; vecs[i].iov_len = maps[i].size; } else { GST_WARNING ("Failed to map memory %p for reading", mem); vecs[i].iov_base = (void *) ""; vecs[i].iov_len = 0; } size += vecs[i].iov_len; } return size; }
static MsdkSurface * get_msdk_surface_from_input_buffer (GstMsdkVPP * thiz, GstBuffer * inbuf) { GstVideoFrame src_frame, out_frame; MsdkSurface *msdk_surface; GstMemory *mem = NULL; if (gst_msdk_is_msdk_buffer (inbuf)) { msdk_surface = g_slice_new0 (MsdkSurface); msdk_surface->surface = gst_msdk_get_surface_from_buffer (inbuf); msdk_surface->buf = gst_buffer_ref (inbuf); return msdk_surface; } /* If upstream hasn't accpeted the proposed msdk bufferpool, * just copy frame (if not dmabuf backed) to msdk buffer and * take a surface from it. */ if (!(msdk_surface = get_surface_from_pool (thiz, thiz->sinkpad_buffer_pool, NULL))) goto error; #ifndef _WIN32 /************ dmabuf-import ************* */ /* if upstream provided a dmabuf backed memory, but not an msdk * buffer, we could export the dmabuf to underlined vasurface */ mem = gst_buffer_peek_memory (inbuf, 0); if (gst_is_dmabuf_memory (mem)) { if (import_dmabuf_to_msdk_surface (thiz, inbuf, msdk_surface)) return msdk_surface; else GST_INFO_OBJECT (thiz, "Upstream dmabuf-backed memory is not imported" "to the msdk surface, fall back to the copy input frame method"); } #endif if (!gst_video_frame_map (&src_frame, &thiz->sinkpad_info, inbuf, GST_MAP_READ)) { GST_ERROR_OBJECT (thiz, "failed to map the frame for source"); goto error; } if (!gst_video_frame_map (&out_frame, &thiz->sinkpad_buffer_pool_info, msdk_surface->buf, GST_MAP_WRITE)) { GST_ERROR_OBJECT (thiz, "failed to map the frame for destination"); gst_video_frame_unmap (&src_frame); goto error; } if (!gst_video_frame_copy (&out_frame, &src_frame)) { GST_ERROR_OBJECT (thiz, "failed to copy frame"); gst_video_frame_unmap (&out_frame); gst_video_frame_unmap (&src_frame); goto error; } gst_video_frame_unmap (&out_frame); gst_video_frame_unmap (&src_frame); return msdk_surface; error: return NULL; }
static gboolean import_dmabuf_to_msdk_surface (GstMsdkVPP * thiz, GstBuffer * buf, MsdkSurface * msdk_surface) { GstMemory *mem = NULL; GstVideoInfo vinfo; GstVideoMeta *vmeta; GstMsdkMemoryID *msdk_mid = NULL; mfxFrameSurface1 *mfx_surface = NULL; gint fd, i; mem = gst_buffer_peek_memory (buf, 0); fd = gst_dmabuf_memory_get_fd (mem); if (fd < 0) return FALSE; vinfo = thiz->sinkpad_info; /* Update offset/stride/size if there is VideoMeta attached to * the buffer */ vmeta = gst_buffer_get_video_meta (buf); if (vmeta) { if (GST_VIDEO_INFO_FORMAT (&vinfo) != vmeta->format || GST_VIDEO_INFO_WIDTH (&vinfo) != vmeta->width || GST_VIDEO_INFO_HEIGHT (&vinfo) != vmeta->height || GST_VIDEO_INFO_N_PLANES (&vinfo) != vmeta->n_planes) { GST_ERROR_OBJECT (thiz, "VideoMeta attached to buffer is not matching" "the negotiated width/height/format"); return FALSE; } for (i = 0; i < GST_VIDEO_INFO_N_PLANES (&vinfo); ++i) { GST_VIDEO_INFO_PLANE_OFFSET (&vinfo, i) = vmeta->offset[i]; GST_VIDEO_INFO_PLANE_STRIDE (&vinfo, i) = vmeta->stride[i]; } GST_VIDEO_INFO_SIZE (&vinfo) = gst_buffer_get_size (buf); } /* Upstream neither accepted the msdk pool nor the msdk buffer size restrictions. * Current media-driver and GMMLib will fail due to strict memory size restrictions. * Ideally, media-driver should accept what ever memory coming from other drivers * in case of dmabuf-import and this is how the intel-vaapi-driver works. * For now, in order to avoid any crash we check the buffer size and fallback * to copy frame method. * * See this: https://github.com/intel/media-driver/issues/169 * */ if (GST_VIDEO_INFO_SIZE (&vinfo) < GST_VIDEO_INFO_SIZE (&thiz->sinkpad_buffer_pool_info)) return FALSE; mfx_surface = msdk_surface->surface; msdk_mid = (GstMsdkMemoryID *) mfx_surface->Data.MemId; /* release the internal memory storage of associated mfxSurface */ gst_msdk_replace_mfx_memid (thiz->context, mfx_surface, VA_INVALID_ID); /* export dmabuf to vasurface */ if (!gst_msdk_export_dmabuf_to_vasurface (thiz->context, &vinfo, fd, msdk_mid->surface)) return FALSE; return TRUE; }
static GstFlowReturn gst_shm_sink_render (GstBaseSink * bsink, GstBuffer * buf) { GstShmSink *self = GST_SHM_SINK (bsink); int rv = 0; GstMapInfo map; gboolean need_new_memory = FALSE; GstFlowReturn ret = GST_FLOW_OK; GstMemory *memory = NULL; GstBuffer *sendbuf = NULL; gsize written_bytes; GST_OBJECT_LOCK (self); while (self->wait_for_connection && !self->clients) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { GST_OBJECT_UNLOCK (self); ret = gst_base_sink_wait_preroll (bsink); if (ret == GST_FLOW_OK) GST_OBJECT_LOCK (self); else return ret; } } while (!gst_shm_sink_can_render (self, GST_BUFFER_TIMESTAMP (buf))) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { GST_OBJECT_UNLOCK (self); ret = gst_base_sink_wait_preroll (bsink); if (ret == GST_FLOW_OK) GST_OBJECT_LOCK (self); else return ret; } } if (gst_buffer_n_memory (buf) > 1) { GST_LOG_OBJECT (self, "Buffer %p has %d GstMemory, we only support a single" " one, need to do a memcpy", buf, gst_buffer_n_memory (buf)); need_new_memory = TRUE; } else { memory = gst_buffer_peek_memory (buf, 0); if (memory->allocator != GST_ALLOCATOR (self->allocator)) { need_new_memory = TRUE; GST_LOG_OBJECT (self, "Memory in buffer %p was not allocated by " "%" GST_PTR_FORMAT ", will memcpy", buf, memory->allocator); } } if (need_new_memory) { if (gst_buffer_get_size (buf) > sp_writer_get_max_buf_size (self->pipe)) { gsize area_size = sp_writer_get_max_buf_size (self->pipe); GST_ELEMENT_ERROR (self, RESOURCE, NO_SPACE_LEFT, (NULL), ("Shared memory area of size %" G_GSIZE_FORMAT " is smaller than" "buffer of size %" G_GSIZE_FORMAT, area_size, gst_buffer_get_size (buf))); goto error; } while ((memory = gst_shm_sink_allocator_alloc_locked (self->allocator, gst_buffer_get_size (buf), &self->params)) == NULL) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { GST_OBJECT_UNLOCK (self); ret = gst_base_sink_wait_preroll (bsink); if (ret == GST_FLOW_OK) GST_OBJECT_LOCK (self); else return ret; } } while (self->wait_for_connection && !self->clients) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { GST_OBJECT_UNLOCK (self); ret = gst_base_sink_wait_preroll (bsink); if (ret == GST_FLOW_OK) { GST_OBJECT_LOCK (self); } else { gst_memory_unref (memory); return ret; } } } if (!gst_memory_map (memory, &map, GST_MAP_WRITE)) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to map memory")); goto error; } GST_DEBUG_OBJECT (self, "Copying %" G_GSIZE_FORMAT " bytes into map of size %" G_GSIZE_FORMAT " bytes.", gst_buffer_get_size (buf), map.size); written_bytes = gst_buffer_extract (buf, 0, map.data, map.size); GST_DEBUG_OBJECT (self, "Copied %" G_GSIZE_FORMAT " bytes.", written_bytes); gst_memory_unmap (memory, &map); sendbuf = gst_buffer_new (); if (!gst_buffer_copy_into (sendbuf, buf, GST_BUFFER_COPY_METADATA, 0, -1)) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to copy data into send buffer")); gst_buffer_unref (sendbuf); goto error; } gst_buffer_append_memory (sendbuf, memory); } else { sendbuf = gst_buffer_ref (buf); } if (!gst_buffer_map (sendbuf, &map, GST_MAP_READ)) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to map data into send buffer")); goto error; } /* Make the memory readonly as of now as we've sent it to the other side * We know it's not mapped for writing anywhere as we just mapped it for * reading */ rv = sp_writer_send_buf (self->pipe, (char *) map.data, map.size, sendbuf); if (rv == -1) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to send data over SHM")); gst_buffer_unmap (sendbuf, &map); goto error; } gst_buffer_unmap (sendbuf, &map); GST_OBJECT_UNLOCK (self); if (rv == 0) { GST_DEBUG_OBJECT (self, "No clients connected, unreffing buffer"); gst_buffer_unref (sendbuf); } return ret; error: GST_OBJECT_UNLOCK (self); return GST_FLOW_ERROR; }