/* * Chain list function for testing buffer lists */ static GstFlowReturn rtp_pipeline_chain_list (GstPad * pad, GstObject * parent, GstBufferList * list) { guint i, len; fail_if (!list); /* * Count the size of the payload in the buffer list. */ len = gst_buffer_list_length (list); /* Loop through all groups */ for (i = 0; i < len; i++) { GstBuffer *paybuf; GstMemory *mem; gint size; paybuf = gst_buffer_list_get (list, i); /* only count real data which is expected in last memory block */ fail_unless (gst_buffer_n_memory (paybuf) > 1); mem = gst_buffer_get_memory_range (paybuf, gst_buffer_n_memory (paybuf) - 1, 1); size = gst_memory_get_sizes (mem, NULL, NULL); gst_memory_unref (mem); chain_list_bytes_received += size; } gst_buffer_list_unref (list); return GST_FLOW_OK; }
static GstFlowReturn gst_file_sink_render (GstBaseSink * sink, GstBuffer * buffer) { GstFileSink *filesink; GstFlowReturn flow; guint8 n_mem; filesink = GST_FILE_SINK_CAST (sink); n_mem = gst_buffer_n_memory (buffer); if (n_mem > 0) flow = gst_file_sink_render_buffers (filesink, &buffer, 1, &n_mem, n_mem); else flow = GST_FLOW_OK; if (flow == GST_FLOW_OK && GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_SYNC_AFTER)) { if (fflush (filesink->file) || fsync (fileno (filesink->file))) { GST_ELEMENT_ERROR (filesink, RESOURCE, WRITE, (_("Error while writing to file \"%s\"."), filesink->filename), ("%s", g_strerror (errno))); flow = GST_FLOW_ERROR; } } return flow; }
static CMBlockBufferRef cm_block_buffer_from_gst_buffer (GstBuffer * buf, GstMapFlags flags) { OSStatus status; CMBlockBufferRef bbuf; CMBlockBufferCustomBlockSource blockSource; guint memcount, i; /* Initialize custom block source structure */ blockSource.version = kCMBlockBufferCustomBlockSourceVersion; blockSource.AllocateBlock = NULL; blockSource.FreeBlock = cm_block_buffer_freeblock; /* Determine number of memory blocks */ memcount = gst_buffer_n_memory (buf); status = CMBlockBufferCreateEmpty (NULL, memcount, 0, &bbuf); if (status != kCMBlockBufferNoErr) { GST_ERROR ("CMBlockBufferCreateEmpty returned %d", (int) status); return NULL; } /* Go over all GstMemory objects and add them to the CMBlockBuffer */ for (i = 0; i < memcount; ++i) { GstMemory *mem; GstMapInfo *info; mem = gst_buffer_get_memory (buf, i); info = g_slice_new (GstMapInfo); if (!gst_memory_map (mem, info, flags)) { GST_ERROR ("failed mapping memory"); g_slice_free (GstMapInfo, info); gst_memory_unref (mem); CFRelease (bbuf); return NULL; } blockSource.refCon = info; status = CMBlockBufferAppendMemoryBlock (bbuf, info->data, info->size, NULL, &blockSource, 0, info->size, 0); if (status != kCMBlockBufferNoErr) { GST_ERROR ("CMBlockBufferAppendMemoryBlock returned %d", (int) status); gst_memory_unmap (mem, info); g_slice_free (GstMapInfo, info); gst_memory_unref (mem); CFRelease (bbuf); return NULL; } } return bbuf; }
static gboolean is_dma_buffer (GstBuffer * buf) { GstMemory *mem; if (gst_buffer_n_memory (buf) < 1) return FALSE; mem = gst_buffer_peek_memory (buf, 0); if (!mem || !gst_is_dmabuf_memory (mem)) return FALSE; return TRUE; }
static gboolean eglimage_video_unmap (GstVideoMeta * meta, guint plane, GstMapInfo * info) { GstMemory *gmem; GstEGLImageMemory *mem; EGLint attribs[] = { MALI_EGL_IMAGE_PLANE, MALI_EGL_IMAGE_PLANE_Y, EGL_NONE }; if (gst_buffer_n_memory (meta->buffer) != 1) return default_unmap_video (meta, plane, info); gmem = gst_buffer_peek_memory (meta->buffer, 0); if (strcmp (gmem->allocator->mem_type, GST_EGL_IMAGE_MEMORY_NAME) != 0) return default_unmap_video (meta, plane, info); mem = GST_EGL_IMAGE_MEMORY ((gmem->parent ? gmem->parent : gmem)); g_mutex_lock (&mem->lock); if (mem->format == GST_VIDEO_FORMAT_YV12) { if (plane == 1) plane = 2; else if (plane == 2) plane = 1; } if (!mem->memory_refcount[plane]) { g_mutex_unlock (&mem->lock); g_return_val_if_reached (FALSE); } mem->memory_refcount[plane]--; if (mem->memory_refcount[plane] > 0) { g_mutex_unlock (&mem->lock); return TRUE; } /* Unmaps automatically */ if (mem->memory_platform_data[plane]) { mali_egl_image_unmap_buffer (mem->image[plane], attribs); mali_egl_image_unlock_ptr (mem->image[plane]); } mem->memory[plane] = NULL; mem->memory_platform_data[plane] = NULL; g_mutex_unlock (&mem->lock); return TRUE; }
static GstFlowReturn gst_file_sink_render_list (GstBaseSink * bsink, GstBufferList * buffer_list) { GstFlowReturn flow; GstBuffer **buffers; GstFileSink *sink; guint8 *mem_nums; guint total_mems; guint i, num_buffers; gboolean sync_after = FALSE; sink = GST_FILE_SINK_CAST (bsink); num_buffers = gst_buffer_list_length (buffer_list); if (num_buffers == 0) goto no_data; /* extract buffers from list and count memories */ buffers = g_newa (GstBuffer *, num_buffers); mem_nums = g_newa (guint8, num_buffers); for (i = 0, total_mems = 0; i < num_buffers; ++i) { buffers[i] = gst_buffer_list_get (buffer_list, i); mem_nums[i] = gst_buffer_n_memory (buffers[i]); total_mems += mem_nums[i]; if (GST_BUFFER_FLAG_IS_SET (buffers[i], GST_BUFFER_FLAG_SYNC_AFTER)) sync_after = TRUE; } flow = gst_file_sink_render_buffers (sink, buffers, num_buffers, mem_nums, total_mems); if (flow == GST_FLOW_OK && sync_after) { if (fflush (sink->file) || fsync (fileno (sink->file))) { GST_ELEMENT_ERROR (sink, RESOURCE, WRITE, (_("Error while writing to file \"%s\"."), sink->filename), ("%s", g_strerror (errno))); flow = GST_FLOW_ERROR; } } return flow; no_data: { GST_LOG_OBJECT (sink, "empty buffer list"); return GST_FLOW_OK; } }
static GstFlowReturn gst_v4l2_buffer_pool_import_dmabuf (GstV4l2BufferPool * pool, GstBuffer * dest, GstBuffer * src) { GstV4l2MemoryGroup *group = NULL; GstMemory *dma_mem[GST_VIDEO_MAX_PLANES] = { 0 }; guint n_mem = gst_buffer_n_memory (src); gint i; GST_LOG_OBJECT (pool, "importing dmabuf"); if (!gst_v4l2_is_buffer_valid (dest, &group)) goto not_our_buffer; if (n_mem > GST_VIDEO_MAX_PLANES) goto too_many_mems; for (i = 0; i < n_mem; i++) dma_mem[i] = gst_buffer_peek_memory (src, i); if (!gst_v4l2_allocator_import_dmabuf (pool->vallocator, group, n_mem, dma_mem)) goto import_failed; gst_mini_object_set_qdata (GST_MINI_OBJECT (dest), GST_V4L2_IMPORT_QUARK, gst_buffer_ref (src), (GDestroyNotify) gst_buffer_unref); return GST_FLOW_OK; not_our_buffer: { GST_ERROR_OBJECT (pool, "destination buffer invalid or not from our pool"); return GST_FLOW_ERROR; } too_many_mems: { GST_ERROR_OBJECT (pool, "could not map buffer"); return GST_FLOW_ERROR; } import_failed: { GST_ERROR_OBJECT (pool, "failed to import dmabuf"); return GST_FLOW_ERROR; } }
static GstFlowReturn gst_fd_sink_render (GstBaseSink * bsink, GstBuffer * buffer) { GstFlowReturn flow; GstFdSink *sink; guint8 n_mem; sink = GST_FD_SINK_CAST (bsink); n_mem = gst_buffer_n_memory (buffer); if (n_mem > 0) flow = gst_fd_sink_render_buffers (sink, &buffer, 1, &n_mem, n_mem); else flow = GST_FLOW_OK; return flow; }
static void gst_mir_buffer_pool_release_buffer (GstBufferPool * pool, GstBuffer * buffer) { #if 1 GstMemory *mem = { NULL }; int err = 0; MediaCodecDelegate delegate; /* Get access to the GstMemory stored in the GstBuffer */ if (gst_buffer_n_memory (buffer) >= 1 && (mem = gst_buffer_peek_memory (buffer, 0)) && gst_is_mir_image_memory (mem)) { GST_DEBUG_OBJECT (pool, "It is Mir image memory"); } else GST_DEBUG_OBJECT (pool, "It is NOT Mir image memory"); delegate = gst_mir_image_memory_get_codec (mem); if (!delegate) { GST_WARNING_OBJECT (pool, "delegate is NULL, rendering will not function"); goto done; } GST_DEBUG_OBJECT (pool, "mem: %p", mem); GST_DEBUG_OBJECT (pool, "gst_mir_image_memory_get_codec (mem): %p", delegate); GST_DEBUG_OBJECT (pool, "gst_mir_image_memory_get_buffer_index (mem): %d", gst_mir_image_memory_get_buffer_index (mem)); GST_DEBUG_OBJECT (pool, "Rendering buffer: %d", gst_mir_image_memory_get_buffer_index (mem)); GST_DEBUG_OBJECT (pool, "Releasing output buffer index: %d", gst_mir_image_memory_get_buffer_index (mem)); /* Render and release the output buffer back to the decoder */ err = media_codec_release_output_buffer (delegate, gst_mir_image_memory_get_buffer_index (mem)); if (err < 0) GST_WARNING_OBJECT (pool, "Failed to release output buffer. Rendering will probably be affected (err: %d).", err); #endif done: GST_BUFFER_POOL_CLASS (parent_class)->release_buffer (pool, buffer); }
static gboolean buffer_list_copy_data (GstBuffer ** buf, guint idx, gpointer data) { GstBuffer *dest = data; guint num, i; if (idx == 0) gst_buffer_copy_into (dest, *buf, GST_BUFFER_COPY_METADATA, 0, -1); num = gst_buffer_n_memory (*buf); for (i = 0; i < num; ++i) { GstMemory *mem; mem = gst_buffer_get_memory (*buf, i); gst_buffer_append_memory (dest, mem); } return TRUE; }
nsRefPtr<PlanarYCbCrImage> GStreamerReader::GetImageFromBuffer(GstBuffer* aBuffer) { nsRefPtr<PlanarYCbCrImage> image = nullptr; if (gst_buffer_n_memory(aBuffer) == 1) { GstMemory* mem = gst_buffer_peek_memory(aBuffer, 0); if (GST_IS_MOZ_GFX_MEMORY_ALLOCATOR(mem->allocator)) { image = moz_gfx_memory_get_image(mem); GstVideoFrame frame; gst_video_frame_map(&frame, &mVideoInfo, aBuffer, GST_MAP_READ); PlanarYCbCrImage::Data data; ImageDataFromVideoFrame(&frame, &data); image->SetDataNoCopy(data); gst_video_frame_unmap(&frame); } } return image; }
static gboolean gst_eglimage_to_gl_texture_upload_meta (GstVideoGLTextureUploadMeta * meta, guint texture_id[4]) { gint i = 0; gint n = 0; g_return_val_if_fail (meta != NULL, FALSE); g_return_val_if_fail (texture_id != NULL, FALSE); GST_DEBUG ("Uploading for meta with textures %i,%i,%i,%i", texture_id[0], texture_id[1], texture_id[2], texture_id[3]); n = gst_buffer_n_memory (meta->buffer); for (i = 0; i < n; i++) { GstMemory *mem = gst_buffer_peek_memory (meta->buffer, i); const GstGLFuncs *gl = NULL; if (!gst_is_egl_image_memory (mem)) { GST_WARNING ("memory %p does not hold an EGLImage", mem); return FALSE; } gl = GST_GL_CONTEXT (GST_EGL_IMAGE_MEMORY (mem)->context)->gl_vtable; gl->ActiveTexture (GL_TEXTURE0 + i); gl->BindTexture (GL_TEXTURE_2D, texture_id[i]); gl->EGLImageTargetTexture2D (GL_TEXTURE_2D, gst_egl_image_memory_get_image (mem)); } if (GST_IS_GL_BUFFER_POOL (meta->buffer->pool)) gst_gl_buffer_pool_replace_last_buffer (GST_GL_BUFFER_POOL (meta-> buffer->pool), meta->buffer); return TRUE; }
void gst_vlc_picture_plane_allocator_release( GstVlcPicturePlaneAllocator *p_allocator, GstBuffer *p_buffer ) { VLC_UNUSED( p_allocator ); GstVlcPicturePlane* p_mem = (GstVlcPicturePlane*) gst_buffer_peek_memory( p_buffer, 0 ); guint i_plane; if( p_mem->p_pic ) { picture_Release( p_mem->p_pic ); for( i_plane = 0; i_plane < gst_buffer_n_memory( p_buffer ); i_plane++ ) { p_mem = (GstVlcPicturePlane*) gst_buffer_peek_memory ( p_buffer, i_plane ); p_mem->p_pic = NULL; p_mem->p_plane = NULL; } } }
static gboolean gst_v4l2_is_buffer_valid (GstBuffer * buffer, GstV4l2MemoryGroup ** out_group) { GstMemory *mem = gst_buffer_peek_memory (buffer, 0); gboolean valid = FALSE; if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY)) goto done; if (gst_is_dmabuf_memory (mem)) mem = gst_mini_object_get_qdata (GST_MINI_OBJECT (mem), GST_V4L2_MEMORY_QUARK); if (mem && gst_is_v4l2_memory (mem)) { GstV4l2Memory *vmem = (GstV4l2Memory *) mem; GstV4l2MemoryGroup *group = vmem->group; gint i; if (group->n_mem != gst_buffer_n_memory (buffer)) goto done; for (i = 0; i < group->n_mem; i++) { if (group->mem[i] != gst_buffer_peek_memory (buffer, i)) goto done; if (!gst_memory_is_writable (group->mem[i])) goto done; } valid = TRUE; if (out_group) *out_group = group; } done: return valid; }
static GstFlowReturn gst_fd_sink_render_list (GstBaseSink * bsink, GstBufferList * buffer_list) { GstFlowReturn flow; GstBuffer **buffers; GstFdSink *sink; guint8 *mem_nums; guint total_mems; guint i, num_buffers; sink = GST_FD_SINK_CAST (bsink); num_buffers = gst_buffer_list_length (buffer_list); if (num_buffers == 0) goto no_data; /* extract buffers from list and count memories */ buffers = g_newa (GstBuffer *, num_buffers); mem_nums = g_newa (guint8, num_buffers); for (i = 0, total_mems = 0; i < num_buffers; ++i) { buffers[i] = gst_buffer_list_get (buffer_list, i); mem_nums[i] = gst_buffer_n_memory (buffers[i]); total_mems += mem_nums[i]; } flow = gst_fd_sink_render_buffers (sink, buffers, num_buffers, mem_nums, total_mems); return flow; no_data: { GST_LOG_OBJECT (sink, "empty buffer list"); return GST_FLOW_OK; } }
static gsize fill_vectors (struct iovec *vecs, GstMapInfo * maps, guint n, GstBuffer * buf) { GstMemory *mem; gsize size = 0; guint i; g_assert (gst_buffer_n_memory (buf) == n); for (i = 0; i < n; ++i) { mem = gst_buffer_peek_memory (buf, i); if (gst_memory_map (mem, &maps[i], GST_MAP_READ)) { vecs[i].iov_base = maps[i].data; vecs[i].iov_len = maps[i].size; } else { GST_WARNING ("Failed to map memory %p for reading", mem); vecs[i].iov_base = (void *) ""; vecs[i].iov_len = 0; } size += vecs[i].iov_len; } return size; }
static gboolean gst_kms_sink_import_dmabuf (GstKMSSink * self, GstBuffer * inbuf, GstBuffer ** outbuf) { gint prime_fds[GST_VIDEO_MAX_PLANES] = { 0, }; GstVideoMeta *meta; guint i, n_mem, n_planes; GstKMSMemory *kmsmem; guint mems_idx[GST_VIDEO_MAX_PLANES]; gsize mems_skip[GST_VIDEO_MAX_PLANES]; GstMemory *mems[GST_VIDEO_MAX_PLANES]; if (!self->has_prime_import) return FALSE; /* This will eliminate most non-dmabuf out there */ if (!gst_is_dmabuf_memory (gst_buffer_peek_memory (inbuf, 0))) return FALSE; n_planes = GST_VIDEO_INFO_N_PLANES (&self->vinfo); n_mem = gst_buffer_n_memory (inbuf); meta = gst_buffer_get_video_meta (inbuf); GST_TRACE_OBJECT (self, "Found a dmabuf with %u planes and %u memories", n_planes, n_mem); /* We cannot have multiple dmabuf per plane */ if (n_mem > n_planes) return FALSE; /* Update video info based on video meta */ if (meta) { GST_VIDEO_INFO_WIDTH (&self->vinfo) = meta->width; GST_VIDEO_INFO_HEIGHT (&self->vinfo) = meta->height; for (i = 0; i < meta->n_planes; i++) { GST_VIDEO_INFO_PLANE_OFFSET (&self->vinfo, i) = meta->offset[i]; GST_VIDEO_INFO_PLANE_STRIDE (&self->vinfo, i) = meta->stride[i]; } } /* Find and validate all memories */ for (i = 0; i < n_planes; i++) { guint length; if (!gst_buffer_find_memory (inbuf, GST_VIDEO_INFO_PLANE_OFFSET (&self->vinfo, i), 1, &mems_idx[i], &length, &mems_skip[i])) return FALSE; mems[i] = gst_buffer_peek_memory (inbuf, mems_idx[i]); /* And all memory found must be dmabuf */ if (!gst_is_dmabuf_memory (mems[i])) return FALSE; } kmsmem = (GstKMSMemory *) get_cached_kmsmem (mems[0]); if (kmsmem) { GST_LOG_OBJECT (self, "found KMS mem %p in DMABuf mem %p with fb id = %d", kmsmem, mems[0], kmsmem->fb_id); goto wrap_mem; } for (i = 0; i < n_planes; i++) prime_fds[i] = gst_dmabuf_memory_get_fd (mems[i]); GST_LOG_OBJECT (self, "found these prime ids: %d, %d, %d, %d", prime_fds[0], prime_fds[1], prime_fds[2], prime_fds[3]); kmsmem = gst_kms_allocator_dmabuf_import (self->allocator, prime_fds, n_planes, mems_skip, &self->vinfo); if (!kmsmem) return FALSE; GST_LOG_OBJECT (self, "setting KMS mem %p to DMABuf mem %p with fb id = %d", kmsmem, mems[0], kmsmem->fb_id); set_cached_kmsmem (mems[0], GST_MEMORY_CAST (kmsmem)); wrap_mem: *outbuf = gst_buffer_new (); if (!*outbuf) return FALSE; gst_buffer_append_memory (*outbuf, gst_memory_ref (GST_MEMORY_CAST (kmsmem))); gst_buffer_add_parent_buffer_meta (*outbuf, inbuf); return TRUE; }
static GstFlowReturn gst_vaapi_video_buffer_pool_acquire_buffer (GstBufferPool * pool, GstBuffer ** out_buffer_ptr, GstBufferPoolAcquireParams * params) { GstVaapiVideoBufferPoolPrivate *const priv = GST_VAAPI_VIDEO_BUFFER_POOL (pool)->priv; GstVaapiVideoBufferPoolAcquireParams *const priv_params = (GstVaapiVideoBufferPoolAcquireParams *) params; GstFlowReturn ret; GstBuffer *buffer; GstMemory *mem; GstVaapiVideoMeta *meta; GstVaapiSurface *surface; GstVaapiBufferProxy *dmabuf_proxy; ret = GST_BUFFER_POOL_CLASS (gst_vaapi_video_buffer_pool_parent_class)->acquire_buffer (pool, &buffer, params); if (!priv->use_dmabuf_memory || !params || !priv_params->proxy || ret != GST_FLOW_OK) { *out_buffer_ptr = buffer; return ret; } /* The point of the following dance is to attach the right GstMemory to the * current acquired buffer. Indeed this buffer can contain any of the * GstFdmemory since this buffer have been popped out from the buffer pool's * FIFO. So there is no garantee that this matches the current surface. The * va decoder driver might not even use a FIFO. So there is no way to guess * on the ordering. In short acquire_current_buffer on the va driver and on * the buffer pool return none matching data. So we have to manually attach * the right GstFdMemory to the acquired GstBuffer. The right GstMemory is * the one associated with the current surface. */ g_assert (gst_buffer_n_memory (buffer) == 1); /* Find the cached memory associated with the given surface. */ surface = GST_VAAPI_SURFACE_PROXY_SURFACE (priv_params->proxy); dmabuf_proxy = gst_vaapi_surface_peek_buffer_proxy (surface); if (dmabuf_proxy) { mem = gst_vaapi_buffer_proxy_peek_mem (dmabuf_proxy); if (mem == gst_buffer_peek_memory (buffer, 0)) mem = NULL; else mem = gst_memory_ref (mem); } else { /* The given surface has not been exported yet. */ meta = gst_buffer_get_vaapi_video_meta (buffer); if (gst_vaapi_video_meta_get_surface_proxy (meta)) gst_vaapi_video_meta_set_surface_proxy (meta, priv_params->proxy); mem = gst_vaapi_dmabuf_memory_new (priv->allocator, gst_buffer_get_vaapi_video_meta (buffer)); } /* Attach the GstFdMemory to the output buffer. */ if (mem) { GST_DEBUG_OBJECT (pool, "assigning memory %p to acquired buffer %p", mem, buffer); gst_buffer_replace_memory (buffer, 0, mem); gst_buffer_unset_flags (buffer, GST_BUFFER_FLAG_TAG_MEMORY); } *out_buffer_ptr = buffer; return GST_FLOW_OK; }
/* called with the object lock held */ static gboolean gst_gl_stereo_mix_process_frames (GstGLStereoMix * mixer) { GstVideoAggregator *vagg = GST_VIDEO_AGGREGATOR (mixer); GstBuffer *converted_buffer, *inbuf; GstVideoInfo *out_info = &vagg->info; #ifndef G_DISABLE_ASSERT gint n; #endif gint v, views; gint valid_views = 0; GList *walk; inbuf = gst_buffer_new (); walk = GST_ELEMENT (mixer)->sinkpads; while (walk) { GstGLStereoMixPad *pad = walk->data; GstMemory *in_mem; GST_LOG_OBJECT (mixer, "Handling frame %d", valid_views); if (!pad || !pad->current_buffer) { GST_DEBUG ("skipping texture, null frame"); walk = g_list_next (walk); continue; } in_mem = gst_buffer_get_memory (pad->current_buffer, 0); GST_LOG_OBJECT (mixer, "Appending memory %" GST_PTR_FORMAT " to intermediate buffer", in_mem); /* Appending the memory to a 2nd buffer locks it * exclusive a 2nd time, which will mark it for * copy-on-write. The ref will keep the memory * alive but we add a parent_buffer_meta to also * prevent the input buffer from returning to any buffer * pool it might belong to */ gst_buffer_append_memory (inbuf, in_mem); /* Use parent buffer meta to keep input buffer alive */ gst_buffer_add_parent_buffer_meta (inbuf, pad->current_buffer); valid_views++; walk = g_list_next (walk); } if (mixer->mix_info.views != valid_views) { GST_WARNING_OBJECT (mixer, "Not enough input views to process"); return FALSE; } if (GST_VIDEO_INFO_MULTIVIEW_MODE (out_info) == GST_VIDEO_MULTIVIEW_MODE_SEPARATED) views = out_info->views; else views = 1; if (gst_gl_view_convert_submit_input_buffer (mixer->viewconvert, FALSE, inbuf) != GST_FLOW_OK) return FALSE; /* Clear any existing buffers, just in case */ gst_buffer_replace (&mixer->primary_out, NULL); gst_buffer_replace (&mixer->auxilliary_out, NULL); if (gst_gl_view_convert_get_output (mixer->viewconvert, &mixer->primary_out) != GST_FLOW_OK) return FALSE; if (GST_VIDEO_INFO_MULTIVIEW_MODE (out_info) == GST_VIDEO_MULTIVIEW_MODE_FRAME_BY_FRAME) { if (gst_gl_view_convert_get_output (mixer->viewconvert, &mixer->auxilliary_out) != GST_FLOW_OK) return FALSE; } if (mixer->primary_out == NULL) return FALSE; converted_buffer = mixer->primary_out; #ifndef G_DISABLE_ASSERT n = gst_buffer_n_memory (converted_buffer); g_assert (n == GST_VIDEO_INFO_N_PLANES (out_info) * views); #endif for (v = 0; v < views; v++) { gst_buffer_add_video_meta_full (converted_buffer, v, GST_VIDEO_INFO_FORMAT (out_info), GST_VIDEO_INFO_WIDTH (out_info), GST_VIDEO_INFO_HEIGHT (out_info), GST_VIDEO_INFO_N_PLANES (out_info), out_info->offset, out_info->stride); if (mixer->auxilliary_out) { gst_buffer_add_video_meta_full (mixer->auxilliary_out, v, GST_VIDEO_INFO_FORMAT (out_info), GST_VIDEO_INFO_WIDTH (out_info), GST_VIDEO_INFO_HEIGHT (out_info), GST_VIDEO_INFO_N_PLANES (out_info), out_info->offset, out_info->stride); } } return TRUE; }
static GstFlowReturn gst_shm_sink_render (GstBaseSink * bsink, GstBuffer * buf) { GstShmSink *self = GST_SHM_SINK (bsink); int rv = 0; GstMapInfo map; gboolean need_new_memory = FALSE; GstFlowReturn ret = GST_FLOW_OK; GstMemory *memory = NULL; GstBuffer *sendbuf = NULL; gsize written_bytes; GST_OBJECT_LOCK (self); while (self->wait_for_connection && !self->clients) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { GST_OBJECT_UNLOCK (self); ret = gst_base_sink_wait_preroll (bsink); if (ret == GST_FLOW_OK) GST_OBJECT_LOCK (self); else return ret; } } while (!gst_shm_sink_can_render (self, GST_BUFFER_TIMESTAMP (buf))) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { GST_OBJECT_UNLOCK (self); ret = gst_base_sink_wait_preroll (bsink); if (ret == GST_FLOW_OK) GST_OBJECT_LOCK (self); else return ret; } } if (gst_buffer_n_memory (buf) > 1) { GST_LOG_OBJECT (self, "Buffer %p has %d GstMemory, we only support a single" " one, need to do a memcpy", buf, gst_buffer_n_memory (buf)); need_new_memory = TRUE; } else { memory = gst_buffer_peek_memory (buf, 0); if (memory->allocator != GST_ALLOCATOR (self->allocator)) { need_new_memory = TRUE; GST_LOG_OBJECT (self, "Memory in buffer %p was not allocated by " "%" GST_PTR_FORMAT ", will memcpy", buf, memory->allocator); } } if (need_new_memory) { if (gst_buffer_get_size (buf) > sp_writer_get_max_buf_size (self->pipe)) { gsize area_size = sp_writer_get_max_buf_size (self->pipe); GST_ELEMENT_ERROR (self, RESOURCE, NO_SPACE_LEFT, (NULL), ("Shared memory area of size %" G_GSIZE_FORMAT " is smaller than" "buffer of size %" G_GSIZE_FORMAT, area_size, gst_buffer_get_size (buf))); goto error; } while ((memory = gst_shm_sink_allocator_alloc_locked (self->allocator, gst_buffer_get_size (buf), &self->params)) == NULL) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { GST_OBJECT_UNLOCK (self); ret = gst_base_sink_wait_preroll (bsink); if (ret == GST_FLOW_OK) GST_OBJECT_LOCK (self); else return ret; } } while (self->wait_for_connection && !self->clients) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { GST_OBJECT_UNLOCK (self); ret = gst_base_sink_wait_preroll (bsink); if (ret == GST_FLOW_OK) { GST_OBJECT_LOCK (self); } else { gst_memory_unref (memory); return ret; } } } if (!gst_memory_map (memory, &map, GST_MAP_WRITE)) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to map memory")); goto error; } GST_DEBUG_OBJECT (self, "Copying %" G_GSIZE_FORMAT " bytes into map of size %" G_GSIZE_FORMAT " bytes.", gst_buffer_get_size (buf), map.size); written_bytes = gst_buffer_extract (buf, 0, map.data, map.size); GST_DEBUG_OBJECT (self, "Copied %" G_GSIZE_FORMAT " bytes.", written_bytes); gst_memory_unmap (memory, &map); sendbuf = gst_buffer_new (); if (!gst_buffer_copy_into (sendbuf, buf, GST_BUFFER_COPY_METADATA, 0, -1)) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to copy data into send buffer")); gst_buffer_unref (sendbuf); goto error; } gst_buffer_append_memory (sendbuf, memory); } else { sendbuf = gst_buffer_ref (buf); } if (!gst_buffer_map (sendbuf, &map, GST_MAP_READ)) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to map data into send buffer")); goto error; } /* Make the memory readonly as of now as we've sent it to the other side * We know it's not mapped for writing anywhere as we just mapped it for * reading */ rv = sp_writer_send_buf (self->pipe, (char *) map.data, map.size, sendbuf); if (rv == -1) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to send data over SHM")); gst_buffer_unmap (sendbuf, &map); goto error; } gst_buffer_unmap (sendbuf, &map); GST_OBJECT_UNLOCK (self); if (rv == 0) { GST_DEBUG_OBJECT (self, "No clients connected, unreffing buffer"); gst_buffer_unref (sendbuf); } return ret; error: GST_OBJECT_UNLOCK (self); return GST_FLOW_ERROR; }
/** * gst_gl_upload_perform_with_buffer: * @upload: a #GstGLUpload * @buffer: a #GstBuffer * @tex_id: resulting texture * * Uploads @buffer to the texture given by @tex_id. @tex_id is valid * until gst_gl_upload_release_buffer() is called. * * Returns: whether the upload was successful */ gboolean gst_gl_upload_perform_with_buffer (GstGLUpload * upload, GstBuffer * buffer, guint * tex_id) { GstMemory *mem; GstVideoGLTextureUploadMeta *gl_tex_upload_meta; guint texture_ids[] = { 0, 0, 0, 0 }; gint i; gboolean ret; g_return_val_if_fail (upload != NULL, FALSE); g_return_val_if_fail (buffer != NULL, FALSE); g_return_val_if_fail (tex_id != NULL, FALSE); g_return_val_if_fail (gst_buffer_n_memory (buffer) > 0, FALSE); gst_gl_upload_release_buffer (upload); /* GstGLMemory */ mem = gst_buffer_peek_memory (buffer, 0); if (gst_is_gl_memory (mem)) { if (GST_VIDEO_INFO_FORMAT (&upload->in_info) == GST_VIDEO_FORMAT_RGBA) { GstMapInfo map_info; gst_memory_map (mem, &map_info, GST_MAP_READ | GST_MAP_GL); gst_memory_unmap (mem, &map_info); *tex_id = ((GstGLMemory *) mem)->tex_id; return TRUE; } GST_LOG_OBJECT (upload, "Attempting upload with GstGLMemory"); for (i = 0; i < GST_VIDEO_INFO_N_PLANES (&upload->in_info); i++) { upload->in_tex[i] = (GstGLMemory *) gst_buffer_peek_memory (buffer, i); } ret = _upload_memory (upload); *tex_id = upload->out_tex->tex_id; for (i = 0; i < GST_VIDEO_INFO_N_PLANES (&upload->in_info); i++) { upload->in_tex[i] = NULL; } return ret; } #if GST_GL_HAVE_PLATFORM_EGL if (!upload->priv->tex_id && gst_is_egl_image_memory (mem)) gst_gl_context_gen_texture (upload->context, &upload->priv->tex_id, GST_VIDEO_FORMAT_RGBA, 0, 0); #endif if (!upload->priv->tex_id) gst_gl_context_gen_texture (upload->context, &upload->priv->tex_id, GST_VIDEO_FORMAT_RGBA, GST_VIDEO_INFO_WIDTH (&upload->in_info), GST_VIDEO_INFO_HEIGHT (&upload->in_info)); /* GstVideoGLTextureUploadMeta */ gl_tex_upload_meta = gst_buffer_get_video_gl_texture_upload_meta (buffer); if (gl_tex_upload_meta) { GST_LOG_OBJECT (upload, "Attempting upload with " "GstVideoGLTextureUploadMeta"); texture_ids[0] = upload->priv->tex_id; if (!gst_gl_upload_perform_with_gl_texture_upload_meta (upload, gl_tex_upload_meta, texture_ids)) { GST_DEBUG_OBJECT (upload, "Upload with GstVideoGLTextureUploadMeta " "failed"); } else { upload->priv->mapped = FALSE; *tex_id = upload->priv->tex_id; return TRUE; } } GST_LOG_OBJECT (upload, "Attempting upload with raw data"); /* GstVideoMeta map */ if (!gst_video_frame_map (&upload->priv->frame, &upload->in_info, buffer, GST_MAP_READ)) { GST_ERROR_OBJECT (upload, "Failed to map memory"); return FALSE; } upload->priv->mapped = TRUE; /* update the video info from the one updated by frame_map using video meta */ gst_gl_upload_set_format (upload, &upload->priv->frame.info); if (!gst_gl_upload_perform_with_data (upload, tex_id, upload->priv->frame.data)) { return FALSE; } return TRUE; }
static GstFlowReturn gst_multiudpsink_render (GstBaseSink * bsink, GstBuffer * buffer) { GstMultiUDPSink *sink; GList *clients; GOutputVector *vec; GstMapInfo *map; guint n_mem, i; gsize size; GstMemory *mem; gint num, no_clients; GError *err = NULL; sink = GST_MULTIUDPSINK (bsink); n_mem = gst_buffer_n_memory (buffer); if (n_mem == 0) goto no_data; /* allocated on the stack, the max number of memory blocks is limited so this * should not cause stack overflows */ vec = sink->vec; map = sink->map; size = 0; for (i = 0; i < n_mem; i++) { mem = gst_buffer_get_memory (buffer, i); gst_memory_map (mem, &map[i], GST_MAP_READ); vec[i].buffer = map[i].data; vec[i].size = map[i].size; size += map[i].size; } sink->bytes_to_serve += size; /* grab lock while iterating and sending to clients, this should be * fast as UDP never blocks */ g_mutex_lock (&sink->client_lock); GST_LOG_OBJECT (bsink, "about to send %" G_GSIZE_FORMAT " bytes in %u blocks", size, n_mem); no_clients = 0; num = 0; for (clients = sink->clients; clients; clients = g_list_next (clients)) { GstUDPClient *client; GSocket *socket; GSocketFamily family; gint count; client = (GstUDPClient *) clients->data; no_clients++; GST_LOG_OBJECT (sink, "sending %" G_GSIZE_FORMAT " bytes to client %p", size, client); family = g_socket_address_get_family (G_SOCKET_ADDRESS (client->addr)); /* Select socket to send from for this address */ if (family == G_SOCKET_FAMILY_IPV6 || !sink->used_socket) socket = sink->used_socket_v6; else socket = sink->used_socket; count = sink->send_duplicates ? client->refcount : 1; while (count--) { gssize ret; ret = g_socket_send_message (socket, client->addr, vec, n_mem, NULL, 0, 0, sink->cancellable, &err); if (G_UNLIKELY (ret < 0)) { if (g_error_matches (err, G_IO_ERROR, G_IO_ERROR_CANCELLED)) goto flushing; /* we continue after posting a warning, next packets might be ok * again */ if (size > UDP_MAX_SIZE) { GST_ELEMENT_WARNING (sink, RESOURCE, WRITE, ("Attempting to send a UDP packet larger than maximum size " "(%" G_GSIZE_FORMAT " > %d)", size, UDP_MAX_SIZE), ("Reason: %s", err ? err->message : "unknown reason")); } else { GST_ELEMENT_WARNING (sink, RESOURCE, WRITE, ("Error sending UDP packet"), ("Reason: %s", err ? err->message : "unknown reason")); } g_clear_error (&err); } else { num++; client->bytes_sent += ret; client->packets_sent++; sink->bytes_served += ret; } } } g_mutex_unlock (&sink->client_lock); /* unmap all memory again */ for (i = 0; i < n_mem; i++) { gst_memory_unmap (map[i].memory, &map[i]); gst_memory_unref (map[i].memory); } GST_LOG_OBJECT (sink, "sent %" G_GSIZE_FORMAT " bytes to %d (of %d) clients", size, num, no_clients); return GST_FLOW_OK; no_data: { return GST_FLOW_OK; } flushing: { GST_DEBUG ("we are flushing"); g_mutex_unlock (&sink->client_lock); g_clear_error (&err); /* unmap all memory */ for (i = 0; i < n_mem; i++) { gst_memory_unmap (map[i].memory, &map[i]); gst_memory_unref (map[i].memory); } return GST_FLOW_FLUSHING; } }
static GstFlowReturn gst_shmdata_sink_render (GstBaseSink * bsink, GstBuffer * buf) { GstShmdataSink *self = GST_SHMDATA_SINK (bsink); int rv = 0; GstMapInfo map; gboolean need_new_memory = FALSE; GstFlowReturn ret = GST_FLOW_OK; GstMemory *memory = NULL; GstBuffer *sendbuf = NULL; GST_OBJECT_LOCK (self); if (gst_buffer_n_memory (buf) > 1) { GST_LOG_OBJECT (self, "Buffer %p has %d GstMemory, we only support a single" " one, need to do a memcpy", buf, gst_buffer_n_memory (buf)); need_new_memory = TRUE; } else { memory = gst_buffer_peek_memory (buf, 0); if (memory->allocator != GST_ALLOCATOR (self->allocator)) { need_new_memory = TRUE; GST_LOG_OBJECT (self, "Memory in buffer %p was not allocated by " "%" GST_PTR_FORMAT ", will memcpy", buf, memory->allocator); } } if (need_new_memory) { if (gst_buffer_get_size (buf) > shmdata_get_shmmax(NULL)) { gsize area_size = shmdata_get_shmmax(NULL); GST_OBJECT_UNLOCK (self); GST_ELEMENT_ERROR (self, RESOURCE, NO_SPACE_LEFT, ("Shared memory area is too small"), ("Shared memory area of size %" G_GSIZE_FORMAT " is smaller than" "buffer of size %" G_GSIZE_FORMAT, area_size, gst_buffer_get_size (buf))); return GST_FLOW_ERROR; } while ((memory = gst_shmdata_sink_allocator_alloc_locked (self->allocator, gst_buffer_get_size (buf), &self->params)) == NULL) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) goto flushing; } gst_memory_map (memory, &map, GST_MAP_WRITE); gst_buffer_extract (buf, 0, map.data, map.size); gst_memory_unmap (memory, &map); sendbuf = gst_buffer_new (); gst_buffer_copy_into (sendbuf, buf, GST_BUFFER_COPY_METADATA, 0, -1); gst_buffer_append_memory (sendbuf, memory); } else { sendbuf = gst_buffer_ref (buf); } gst_buffer_map (sendbuf, &map, GST_MAP_READ); /* Make the memory readonly as of now as we've sent it to the other side * We know it's not mapped for writing anywhere as we just mapped it for * reading */ shmdata_notify_clients(self->access, map.size); self->bytes_since_last_request += map.size; shmdata_release_one_write_access(self->access); // wait for client to read and take the write lock self->access = shmdata_get_one_write_access(self->shmwriter); gst_buffer_unmap (sendbuf, &map); GST_OBJECT_UNLOCK (self); GST_DEBUG_OBJECT (self, "No clients connected, unreffing buffer"); gst_buffer_unref (sendbuf); /* If we allocated our own memory, then unmap it */ return ret; flushing: GST_OBJECT_UNLOCK (self); return GST_FLOW_FLUSHING; }
static GstFlowReturn gst_omx_buffer_pool_alloc_buffer (GstBufferPool * bpool, GstBuffer ** buffer, GstBufferPoolAcquireParams * params) { GstOMXBufferPool *pool = GST_OMX_BUFFER_POOL (bpool); GstBuffer *buf; GstOMXBuffer *omx_buf; g_return_val_if_fail (pool->allocating, GST_FLOW_ERROR); omx_buf = g_ptr_array_index (pool->port->buffers, pool->current_buffer_index); g_return_val_if_fail (omx_buf != NULL, GST_FLOW_ERROR); if (pool->other_pool) { guint i, n; buf = g_ptr_array_index (pool->buffers, pool->current_buffer_index); g_assert (pool->other_pool == buf->pool); gst_object_replace ((GstObject **) & buf->pool, NULL); n = gst_buffer_n_memory (buf); for (i = 0; i < n; i++) { GstMemory *mem = gst_buffer_peek_memory (buf, i); /* FIXME: We don't allow sharing because we need to know * when the memory becomes unused and can only then put * it back to the pool. Which is done in the pool's release * function */ GST_MINI_OBJECT_FLAG_SET (mem, GST_MEMORY_FLAG_NO_SHARE); } if (pool->add_videometa) { GstVideoMeta *meta; meta = gst_buffer_get_video_meta (buf); if (!meta) { gst_buffer_add_video_meta (buf, GST_VIDEO_FRAME_FLAG_NONE, GST_VIDEO_INFO_FORMAT (&pool->video_info), GST_VIDEO_INFO_WIDTH (&pool->video_info), GST_VIDEO_INFO_HEIGHT (&pool->video_info)); } } pool->need_copy = FALSE; } else { GstMemory *mem; const guint nstride = pool->port->port_def.format.video.nStride; const guint nslice = pool->port->port_def.format.video.nSliceHeight; gsize offset[GST_VIDEO_MAX_PLANES] = { 0, }; gint stride[GST_VIDEO_MAX_PLANES] = { nstride, 0, }; mem = gst_omx_memory_allocator_alloc (pool->allocator, 0, omx_buf); buf = gst_buffer_new (); gst_buffer_append_memory (buf, mem); g_ptr_array_add (pool->buffers, buf); switch (GST_VIDEO_INFO_FORMAT (&pool->video_info)) { case GST_VIDEO_FORMAT_ABGR: case GST_VIDEO_FORMAT_ARGB: case GST_VIDEO_FORMAT_RGB16: case GST_VIDEO_FORMAT_BGR16: case GST_VIDEO_FORMAT_YUY2: case GST_VIDEO_FORMAT_UYVY: case GST_VIDEO_FORMAT_YVYU: case GST_VIDEO_FORMAT_GRAY8: break; case GST_VIDEO_FORMAT_I420: stride[1] = nstride / 2; offset[1] = offset[0] + stride[0] * nslice; stride[2] = nstride / 2; offset[2] = offset[1] + (stride[1] * nslice / 2); break; case GST_VIDEO_FORMAT_NV12: case GST_VIDEO_FORMAT_NV16: stride[1] = nstride; offset[1] = offset[0] + stride[0] * nslice; break; default: g_assert_not_reached (); break; } if (pool->add_videometa) { pool->need_copy = FALSE; } else { GstVideoInfo info; gboolean need_copy = FALSE; gint i; gst_video_info_init (&info); gst_video_info_set_format (&info, GST_VIDEO_INFO_FORMAT (&pool->video_info), GST_VIDEO_INFO_WIDTH (&pool->video_info), GST_VIDEO_INFO_HEIGHT (&pool->video_info)); for (i = 0; i < GST_VIDEO_INFO_N_PLANES (&pool->video_info); i++) { if (info.stride[i] != stride[i] || info.offset[i] != offset[i]) { need_copy = TRUE; break; } } pool->need_copy = need_copy; } if (pool->need_copy || pool->add_videometa) { /* We always add the videometa. It's the job of the user * to copy the buffer if pool->need_copy is TRUE */ gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE, GST_VIDEO_INFO_FORMAT (&pool->video_info), GST_VIDEO_INFO_WIDTH (&pool->video_info), GST_VIDEO_INFO_HEIGHT (&pool->video_info), GST_VIDEO_INFO_N_PLANES (&pool->video_info), offset, stride); } } gst_mini_object_set_qdata (GST_MINI_OBJECT_CAST (buf), gst_omx_buffer_data_quark, omx_buf, NULL); *buffer = buf; pool->current_buffer_index++; return GST_FLOW_OK; }
static gboolean gst_imx_phys_meta_transform(GstBuffer *dest, GstMeta *meta, GstBuffer *buffer, GQuark type, gpointer data) { GstImxPhysMemMeta *dmeta, *smeta; smeta = (GstImxPhysMemMeta *)meta; if (GST_META_TRANSFORM_IS_COPY(type)) { GstMetaTransformCopy *copy = data; gboolean do_copy = FALSE; if (!(copy->region)) { GST_LOG("not copying metadata: only a region is being copied (not the entire block)"); } else { guint n_mem_buffer, n_mem_dest; n_mem_buffer = gst_buffer_n_memory(buffer); n_mem_dest = gst_buffer_n_memory(dest); /* only copy if both buffers have 1 identical memory */ if ((n_mem_buffer == n_mem_dest) && (n_mem_dest == 1)) { GstMemory *mem1, *mem2; mem1 = gst_buffer_get_memory(dest, 0); mem2 = gst_buffer_get_memory(buffer, 0); if (mem1 == mem2) { GST_LOG("copying physmem metadata: memory blocks identical"); do_copy = TRUE; } else GST_LOG("not copying physmem metadata: memory blocks not identical"); gst_memory_unref(mem1); gst_memory_unref(mem2); } else GST_LOG("not copying physmem metadata: num memory blocks in source/dest: %u/%u", n_mem_buffer, n_mem_dest); } if (do_copy) { /* only copy if the complete data is copied as well */ dmeta = (GstImxPhysMemMeta *)gst_buffer_add_meta(dest, gst_imx_phys_mem_meta_get_info(), NULL); if (!dmeta) { GST_ERROR("could not add physmem metadata to the dest buffer"); return FALSE; } dmeta->phys_addr = smeta->phys_addr; dmeta->x_padding = smeta->x_padding; dmeta->y_padding = smeta->y_padding; if (smeta->parent) dmeta->parent = gst_buffer_ref(smeta->parent); else dmeta->parent = gst_buffer_ref(buffer); } } return TRUE; }
static GstFlowReturn gst_shm_sink_render (GstBaseSink * bsink, GstBuffer * buf) { GstShmSink *self = GST_SHM_SINK (bsink); int rv = 0; GstMapInfo map; gboolean need_new_memory = FALSE; GstFlowReturn ret = GST_FLOW_OK; GstMemory *memory = NULL; GstBuffer *sendbuf = NULL; GST_OBJECT_LOCK (self); while (self->wait_for_connection && !self->clients) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) goto flushing; } while (!gst_shm_sink_can_render (self, GST_BUFFER_TIMESTAMP (buf))) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) goto flushing; } if (gst_buffer_n_memory (buf) > 1) { GST_LOG_OBJECT (self, "Buffer %p has %d GstMemory, we only support a single" " one, need to do a memcpy", buf, gst_buffer_n_memory (buf)); need_new_memory = TRUE; } else { memory = gst_buffer_peek_memory (buf, 0); if (memory->allocator != GST_ALLOCATOR (self->allocator)) { need_new_memory = TRUE; GST_LOG_OBJECT (self, "Memory in buffer %p was not allocated by " "%" GST_PTR_FORMAT ", will memcpy", buf, memory->allocator); } } if (need_new_memory) { if (gst_buffer_get_size (buf) > sp_writer_get_max_buf_size (self->pipe)) { gsize area_size = sp_writer_get_max_buf_size (self->pipe); GST_OBJECT_UNLOCK (self); GST_ELEMENT_ERROR (self, RESOURCE, NO_SPACE_LEFT, ("Shared memory area is too small"), ("Shared memory area of size %" G_GSIZE_FORMAT " is smaller than" "buffer of size %" G_GSIZE_FORMAT, area_size, gst_buffer_get_size (buf))); return GST_FLOW_ERROR; } while ((memory = gst_shm_sink_allocator_alloc_locked (self->allocator, gst_buffer_get_size (buf), &self->params)) == NULL) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) goto flushing; } while (self->wait_for_connection && !self->clients) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { gst_memory_unref (memory); GST_OBJECT_UNLOCK (self); return GST_FLOW_FLUSHING; } } gst_memory_map (memory, &map, GST_MAP_WRITE); gst_buffer_extract (buf, 0, map.data, map.size); gst_memory_unmap (memory, &map); sendbuf = gst_buffer_new (); gst_buffer_copy_into (sendbuf, buf, GST_BUFFER_COPY_METADATA, 0, -1); gst_buffer_append_memory (sendbuf, memory); } else { sendbuf = gst_buffer_ref (buf); } gst_buffer_map (sendbuf, &map, GST_MAP_READ); /* Make the memory readonly as of now as we've sent it to the other side * We know it's not mapped for writing anywhere as we just mapped it for * reading */ rv = sp_writer_send_buf (self->pipe, (char *) map.data, map.size, sendbuf); gst_buffer_unmap (sendbuf, &map); GST_OBJECT_UNLOCK (self); if (rv == 0) { GST_DEBUG_OBJECT (self, "No clients connected, unreffing buffer"); gst_buffer_unref (sendbuf); } else if (rv == -1) { GST_ELEMENT_ERROR (self, STREAM, FAILED, ("Invalid allocated buffer"), ("The shmpipe library rejects our buffer, this is a bug")); ret = GST_FLOW_ERROR; } /* If we allocated our own memory, then unmap it */ return ret; flushing: GST_OBJECT_UNLOCK (self); return GST_FLOW_FLUSHING; }
static GstFlowReturn gst_multiudpsink_render (GstBaseSink * bsink, GstBuffer * buffer) { GstMultiUDPSink *sink; GList *clients; GOutputVector *vec; GstMapInfo *map; guint n_mem, i; gsize size; GstMemory *mem; gint num, no_clients; GError *err = NULL; sink = GST_MULTIUDPSINK (bsink); n_mem = gst_buffer_n_memory (buffer); if (n_mem == 0) goto no_data; vec = g_new (GOutputVector, n_mem); map = g_new (GstMapInfo, n_mem); size = 0; for (i = 0; i < n_mem; i++) { mem = gst_buffer_get_memory (buffer, i); gst_memory_map (mem, &map[i], GST_MAP_READ); if (map[i].size > UDP_MAX_SIZE) { GST_WARNING ("Attempting to send a UDP packet larger than maximum " "size (%" G_GSIZE_FORMAT " > %d)", map[i].size, UDP_MAX_SIZE); } vec[i].buffer = map[i].data; vec[i].size = map[i].size; size += map[i].size; } sink->bytes_to_serve += size; /* grab lock while iterating and sending to clients, this should be * fast as UDP never blocks */ g_mutex_lock (&sink->client_lock); GST_LOG_OBJECT (bsink, "about to send %" G_GSIZE_FORMAT " bytes", size); no_clients = 0; num = 0; for (clients = sink->clients; clients; clients = g_list_next (clients)) { GstUDPClient *client; gint count; client = (GstUDPClient *) clients->data; no_clients++; GST_LOG_OBJECT (sink, "sending %" G_GSIZE_FORMAT " bytes to client %p", size, client); count = sink->send_duplicates ? client->refcount : 1; while (count--) { gssize ret; ret = g_socket_send_message (sink->used_socket, client->addr, vec, n_mem, NULL, 0, 0, sink->cancellable, &err); if (ret < 0) goto send_error; num++; client->bytes_sent += ret; client->packets_sent++; sink->bytes_served += ret; } } g_mutex_unlock (&sink->client_lock); /* unmap all memory again */ for (i = 0; i < n_mem; i++) { gst_memory_unmap (map[i].memory, &map[i]); gst_memory_unref (map[i].memory); } g_free (vec); g_free (map); GST_LOG_OBJECT (sink, "sent %" G_GSIZE_FORMAT " bytes to %d (of %d) clients", size, num, no_clients); return GST_FLOW_OK; no_data: { return GST_FLOW_OK; } send_error: { g_mutex_unlock (&sink->client_lock); GST_DEBUG ("got send error %s", err->message); g_clear_error (&err); return GST_FLOW_ERROR; } }
static gboolean eglimage_video_map (GstVideoMeta * meta, guint plane, GstMapInfo * info, gpointer * data, gint * stride, GstMapFlags flags) { GstMemory *gmem; GstEGLImageMemory *mem; GstVideoInfo vinfo; if (gst_buffer_n_memory (meta->buffer) != 1) return default_map_video (meta, plane, info, data, stride, flags); gmem = gst_buffer_peek_memory (meta->buffer, 0); if (strcmp (gmem->allocator->mem_type, GST_EGL_IMAGE_MEMORY_NAME) != 0) return default_map_video (meta, plane, info, data, stride, flags); mem = GST_EGL_IMAGE_MEMORY ((gmem->parent ? gmem->parent : gmem)); g_mutex_lock (&mem->lock); if (mem->format == GST_VIDEO_FORMAT_YV12) { if (plane == 1) plane = 2; else if (plane == 2) plane = 1; } if (mem->mapped_memory_refcount) { /* Only multiple READ maps are allowed */ if ((mem->mapped_memory_flags & GST_MAP_WRITE)) { g_mutex_unlock (&mem->lock); return FALSE; } } if (!mem->memory_refcount[plane]) { EGLint attribs[] = { MALI_EGL_IMAGE_PLANE, MALI_EGL_IMAGE_PLANE_Y, MALI_EGL_IMAGE_ACCESS_MODE, MALI_EGL_IMAGE_ACCESS_READ_WRITE, EGL_NONE }; if ((flags & GST_MAP_READ) && (flags & GST_MAP_WRITE)) attribs[3] = MALI_EGL_IMAGE_ACCESS_READ_WRITE; else if ((flags & GST_MAP_READ)) attribs[3] = MALI_EGL_IMAGE_ACCESS_READ_ONLY; else if ((flags & GST_MAP_WRITE)) attribs[3] = MALI_EGL_IMAGE_ACCESS_WRITE_ONLY; mem->memory_platform_data[plane] = mali_egl_image_lock_ptr (mem->image[plane]); if (!mem->memory_platform_data[plane]) { GST_ERROR ("Failed to lock Mali EGL image: 0x%04x", mali_egl_image_get_error ()); goto map_error; } mem->memory[plane] = mali_egl_image_map_buffer (mem->memory_platform_data[plane], attribs); if (!mem->memory[plane]) goto map_error; mem->memory_flags[plane] = flags; } else { /* Only multiple READ maps are allowed */ if ((mem->memory_flags[plane] & GST_MAP_WRITE)) { g_mutex_unlock (&mem->lock); return FALSE; } } mem->memory_refcount[plane]++; gst_video_info_set_format (&vinfo, mem->format, mem->width, mem->height); *data = mem->memory[plane]; *stride = mem->stride[plane]; g_mutex_unlock (&mem->lock); return TRUE; map_error: { EGLint attribs[] = { MALI_EGL_IMAGE_PLANE, MALI_EGL_IMAGE_PLANE_Y, EGL_NONE }; GST_ERROR ("Failed to map Mali EGL image: 0x%04x", mali_egl_image_get_error ()); if (mem->memory_platform_data[plane]) { mali_egl_image_unmap_buffer (mem->image[plane], attribs); mali_egl_image_unlock_ptr (mem->image[plane]); } mem->memory[plane] = NULL; mem->memory_platform_data[plane] = NULL; g_mutex_unlock (&mem->lock); return FALSE; } }
static GstFlowReturn gst_mfc_dec_fill_outbuf (GstMFCDec * self, GstBuffer * outbuf, struct mfc_buffer *mfc_outbuf, GstVideoCodecState * state) { GstFlowReturn ret = GST_FLOW_OK; const guint8 *mfc_outbuf_comps[3] = { NULL, }; gint i, j, h, w, src_stride, dst_stride; guint8 *dst_, *src_; GstVideoFrame vframe; Fimc *fimc = self->fimc; gboolean zerocopy, has_cropping; memset (&vframe, 0, sizeof (vframe)); zerocopy = TRUE; /* FIXME: Not 100% correct, we need the memory of each * plane to be contiguous at least */ if (GST_VIDEO_INFO_N_PLANES (&state->info) > gst_buffer_n_memory (outbuf)) { zerocopy = FALSE; } else { gint n = gst_buffer_n_memory (outbuf); for (i = 0; i < n; i++) { GstMemory *mem = gst_buffer_peek_memory (outbuf, i); if (!GST_MEMORY_IS_PHYSICALLY_CONTIGUOUS (mem)) { zerocopy = FALSE; break; } } } has_cropping = self->has_cropping && (self->width != self->crop_width || self->height != self->crop_height); /* We only do cropping if we do zerocopy and downstream * supports cropping. For non-zerocopy we can do cropping * more efficient. * We can't do cropping ourself with zerocopy because * FIMC returns EFAULT when queueing the destination * buffers */ if (zerocopy && has_cropping) { GstVideoCropMeta *crop; crop = gst_buffer_add_video_crop_meta (outbuf); crop->x = self->crop_left; crop->y = self->crop_top; crop->width = self->crop_width; crop->height = self->crop_height; } if (!gst_video_frame_map (&vframe, &state->info, outbuf, GST_MAP_WRITE)) goto frame_map_error; mfc_buffer_get_output_data (mfc_outbuf, (void **) &mfc_outbuf_comps[0], (void **) &mfc_outbuf_comps[1]); if (zerocopy && (has_cropping || (self->width == self->crop_width && self->height == self->crop_height))) { void *dst[3]; if (self->mmap || !self->fimc) { if (!gst_mfc_dec_create_fimc (self, state)) goto fimc_create_error; fimc = self->fimc; if (self->format == GST_VIDEO_FORMAT_NV12) { self->dst_stride[0] = GST_ROUND_UP_4 (self->width); self->dst_stride[1] = GST_ROUND_UP_4 (self->width); self->dst_stride[2] = 0; } else { self->dst_stride[0] = GST_ROUND_UP_4 (self->width); self->dst_stride[1] = GST_ROUND_UP_4 ((self->width + 1) / 2); self->dst_stride[2] = GST_ROUND_UP_4 ((self->width + 1) / 2); } if (has_cropping) { if (fimc_set_dst_format (fimc, self->fimc_format, self->width, self->height, self->dst_stride, 0, 0, self->width, self->height) < 0) goto fimc_dst_error; } else { if (fimc_set_dst_format (fimc, self->fimc_format, self->width, self->height, self->dst_stride, self->crop_left, self->crop_top, self->crop_width, self->crop_height) < 0) goto fimc_dst_error; } self->mmap = FALSE; if (fimc_request_dst_buffers (fimc) < 0) goto fimc_dst_requestbuffers_error; self->dst[0] = NULL; self->dst[1] = NULL; self->dst[2] = NULL; } dst[0] = GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0); dst[1] = GST_VIDEO_FRAME_PLANE_DATA (&vframe, 1); if (self->format == GST_VIDEO_FORMAT_NV12) dst[2] = NULL; else dst[2] = GST_VIDEO_FRAME_PLANE_DATA (&vframe, 2); if (fimc_convert (fimc, (void **) mfc_outbuf_comps, (void **) dst) < 0) goto fimc_convert_error; } else { if (!self->mmap || !self->fimc) { if (!gst_mfc_dec_create_fimc (self, state)) goto fimc_create_error; self->dst_stride[0] = 0; self->dst_stride[1] = 0; self->dst_stride[2] = 0; self->mmap = TRUE; fimc = self->fimc; } if (!self->dst[0]) { if (fimc_set_dst_format (fimc, self->fimc_format, self->width, self->height, self->dst_stride, self->crop_left, self->crop_top, self->crop_width, self->crop_height) < 0) goto fimc_dst_error; if (fimc_request_dst_buffers_mmap (fimc, self->dst, self->dst_stride) < 0) goto fimc_dst_requestbuffers_error; } if (fimc_convert (fimc, (void **) mfc_outbuf_comps, (void **) self->dst) < 0) goto fimc_convert_error; switch (state->info.finfo->format) { case GST_VIDEO_FORMAT_RGBx: dst_ = (guint8 *) GST_VIDEO_FRAME_COMP_DATA (&vframe, 0); src_ = self->dst[0]; src_stride = self->dst_stride[0]; h = GST_VIDEO_FRAME_HEIGHT (&vframe); w = GST_VIDEO_FRAME_WIDTH (&vframe); dst_stride = GST_VIDEO_FRAME_COMP_STRIDE (&vframe, 0); for (i = 0; i < h; i++) { memcpy (dst_, src_, w); dst_ += dst_stride; src_ += src_stride; } break; case GST_VIDEO_FORMAT_I420: case GST_VIDEO_FORMAT_YV12: for (j = 0; j < 3; j++) { dst_ = (guint8 *) GST_VIDEO_FRAME_COMP_DATA (&vframe, j); src_ = self->dst[j]; src_stride = self->dst_stride[j]; h = GST_VIDEO_FRAME_COMP_HEIGHT (&vframe, j); w = GST_VIDEO_FRAME_COMP_WIDTH (&vframe, j); dst_stride = GST_VIDEO_FRAME_COMP_STRIDE (&vframe, j); for (i = 0; i < h; i++) { memcpy (dst_, src_, w); dst_ += dst_stride; src_ += src_stride; } } break; case GST_VIDEO_FORMAT_NV12: for (j = 0; j < 2; j++) { dst_ = (guint8 *) GST_VIDEO_FRAME_PLANE_DATA (&vframe, j); src_ = self->dst[j]; src_stride = self->dst_stride[j]; h = GST_VIDEO_FRAME_COMP_HEIGHT (&vframe, j); w = GST_VIDEO_FRAME_COMP_WIDTH (&vframe, j) * (j == 0 ? 1 : 2); dst_stride = GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, j); for (i = 0; i < h; i++) { memcpy (dst_, src_, w); dst_ += dst_stride; src_ += src_stride; } } break; default: g_assert_not_reached (); break; } } done: if (vframe.buffer) gst_video_frame_unmap (&vframe); return ret; frame_map_error: { GST_ELEMENT_ERROR (self, CORE, FAILED, ("Failed to map output buffer"), (NULL)); ret = GST_FLOW_ERROR; goto done; } fimc_create_error: { ret = GST_FLOW_ERROR; goto done; } fimc_dst_error: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, ("Failed to set FIMC destination parameters"), (NULL)); ret = GST_FLOW_ERROR; goto done; } fimc_dst_requestbuffers_error: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, ("Failed to request FIMC destination buffers"), (NULL)); ret = GST_FLOW_ERROR; goto done; } fimc_convert_error: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, ("Failed to convert via FIMC"), (NULL)); ret = GST_FLOW_ERROR; goto done; } }