/** * gst_object_replace: * @oldobj: (inout) (transfer full): pointer to a place of a #GstObject to * replace * @newobj: (transfer none): a new #GstObject * * Unrefs the #GstObject pointed to by @oldobj, refs @newobj and * puts @newobj in *@oldobj. Be carefull when calling this * function, it does not take any locks. You might want to lock * the object owning @oldobj pointer before calling this * function. * * Make sure not to LOCK @oldobj because it might be unreffed * which could cause a deadlock when it is disposed. * * Since 0.10.36, this function operates atomically. */ void gst_object_replace (GstObject ** oldobj, GstObject * newobj) { GstObject *oldptr; g_return_if_fail (oldobj != NULL); g_return_if_fail (*oldobj == NULL || GST_IS_OBJECT (*oldobj)); g_return_if_fail (newobj == NULL || GST_IS_OBJECT (newobj)); #ifdef DEBUG_REFCOUNT GST_CAT_TRACE (GST_CAT_REFCOUNTING, "replace %p %s (%d) with %p %s (%d)", *oldobj, *oldobj ? GST_STR_NULL (GST_OBJECT_NAME (*oldobj)) : "(NONE)", *oldobj ? G_OBJECT (*oldobj)->ref_count : 0, newobj, newobj ? GST_STR_NULL (GST_OBJECT_NAME (newobj)) : "(NONE)", newobj ? G_OBJECT (newobj)->ref_count : 0); #endif if (newobj) g_object_ref (newobj); do { oldptr = *oldobj; } while (!G_ATOMIC_POINTER_COMPARE_AND_EXCHANGE (oldobj, oldptr, newobj)); if (oldptr) g_object_unref (oldptr); }
/** * gst_mini_object_unlock: * @object: the mini-object to unlock * @flags: #GstLockFlags * * Unlock the mini-object with the specified access mode in @flags. */ void gst_mini_object_unlock (GstMiniObject * object, GstLockFlags flags) { gint access_mode, state, newstate; g_return_if_fail (object != NULL); g_return_if_fail (GST_MINI_OBJECT_IS_LOCKABLE (object)); do { access_mode = flags & FLAG_MASK; newstate = state = g_atomic_int_get (&object->lockstate); GST_CAT_TRACE (GST_CAT_LOCKING, "unlock %p: state %08x, access_mode %d", object, state, access_mode); if (access_mode & GST_LOCK_FLAG_EXCLUSIVE) { /* shared counter */ g_return_if_fail (state >= SHARE_ONE); newstate -= SHARE_ONE; access_mode &= ~GST_LOCK_FLAG_EXCLUSIVE; } if (access_mode) { g_return_if_fail ((state & access_mode) == access_mode); /* decrease the refcount */ newstate -= LOCK_ONE; /* last refcount, unset access_mode */ if ((newstate & LOCK_FLAG_MASK) == access_mode) newstate &= ~LOCK_FLAG_MASK; } } while (!g_atomic_int_compare_and_exchange (&object->lockstate, state, newstate)); }
static void _generate_texture (GstGLContext * context, GenTexture * data) { const GstGLFuncs *gl = context->gl_vtable; GLenum internal_format; GST_CAT_TRACE (GST_CAT_GL_MEMORY, "Generating texture format:%u type:%u dimensions:%ux%u", data->gl_format, data->gl_type, data->width, data->height); internal_format = _sized_gl_format_from_gl_format_type (data->gl_format, data->gl_type); gl->GenTextures (1, &data->result); gl->BindTexture (GL_TEXTURE_2D, data->result); gl->TexImage2D (GL_TEXTURE_2D, 0, internal_format, data->width, data->height, 0, data->gl_format, data->gl_type, NULL); gl->TexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); gl->TexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); gl->TexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); gl->TexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); GST_CAT_LOG (GST_CAT_GL_MEMORY, "generated texture id:%d", data->result); }
/** * gst_object_replace: * @oldobj: (inout) (transfer full): pointer to a place of a #GstObject to * replace * @newobj: (transfer none): a new #GstObject * * Atomically modifies a pointer to point to a new object. * The reference count of @oldobj is decreased and the reference count of * @newobj is increased. * * Either @newobj and the value pointed to by @oldobj may be NULL. * * Returns: TRUE if @newobj was different from @oldobj */ gboolean gst_object_replace (GstObject ** oldobj, GstObject * newobj) { GstObject *oldptr; g_return_val_if_fail (oldobj != NULL, FALSE); #ifdef DEBUG_REFCOUNT GST_CAT_TRACE (GST_CAT_REFCOUNTING, "replace %p %s (%d) with %p %s (%d)", *oldobj, *oldobj ? GST_STR_NULL (GST_OBJECT_NAME (*oldobj)) : "(NONE)", *oldobj ? G_OBJECT (*oldobj)->ref_count : 0, newobj, newobj ? GST_STR_NULL (GST_OBJECT_NAME (newobj)) : "(NONE)", newobj ? G_OBJECT (newobj)->ref_count : 0); #endif oldptr = g_atomic_pointer_get ((gpointer *) oldobj); if (G_UNLIKELY (oldptr == newobj)) return FALSE; if (newobj) gst_object_ref (newobj); while (G_UNLIKELY (!g_atomic_pointer_compare_and_exchange ((gpointer *) oldobj, oldptr, newobj))) { oldptr = g_atomic_pointer_get ((gpointer *) oldobj); if (G_UNLIKELY (oldptr == newobj)) break; } if (oldptr) gst_object_unref (oldptr); return oldptr != newobj; }
/** * gst_mini_object_replace: * @olddata: (inout) (transfer full): pointer to a pointer to a mini-object to * be replaced * @newdata: pointer to new mini-object * * Atomically modifies a pointer to point to a new mini-object. * The reference count of @olddata is decreased and the reference count of * @newdata is increased. * * Either @newdata and the value pointed to by @olddata may be NULL. * * Returns: TRUE if @newdata was different from @olddata */ gboolean gst_mini_object_replace (GstMiniObject ** olddata, GstMiniObject * newdata) { GstMiniObject *olddata_val; g_return_val_if_fail (olddata != NULL, FALSE); GST_CAT_TRACE (GST_CAT_REFCOUNTING, "replace %p (%d) with %p (%d)", *olddata, *olddata ? (*olddata)->refcount : 0, newdata, newdata ? newdata->refcount : 0); olddata_val = g_atomic_pointer_get ((gpointer *) olddata); if (G_UNLIKELY (olddata_val == newdata)) return FALSE; if (newdata) gst_mini_object_ref (newdata); while (G_UNLIKELY (!g_atomic_pointer_compare_and_exchange ((gpointer *) olddata, olddata_val, newdata))) { olddata_val = g_atomic_pointer_get ((gpointer *) olddata); if (G_UNLIKELY (olddata_val == newdata)) break; } if (olddata_val) gst_mini_object_unref (olddata_val); return olddata_val != newdata; }
static VkBool32 _gst_vk_debug_callback (VkDebugReportFlagsEXT msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject, size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg, void *pUserData) { if (msgFlags & VK_DEBUG_REPORT_ERROR_BIT_EXT) { GST_CAT_ERROR (GST_VULKAN_DEBUG_CAT, "[%s] Code %d : %s", pLayerPrefix, msgCode, pMsg); } else if (msgFlags & VK_DEBUG_REPORT_WARNING_BIT_EXT) { GST_CAT_WARNING (GST_VULKAN_DEBUG_CAT, "[%s] Code %d : %s", pLayerPrefix, msgCode, pMsg); } else if (msgFlags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT) { GST_CAT_LOG (GST_VULKAN_DEBUG_CAT, "[%s] Code %d : %s", pLayerPrefix, msgCode, pMsg); } else if (msgFlags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) { GST_CAT_FIXME (GST_VULKAN_DEBUG_CAT, "[%s] Code %d : %s", pLayerPrefix, msgCode, pMsg); } else if (msgFlags & VK_DEBUG_REPORT_DEBUG_BIT_EXT) { GST_CAT_TRACE (GST_VULKAN_DEBUG_CAT, "[%s] Code %d : %s", pLayerPrefix, msgCode, pMsg); } else { return FALSE; } /* * false indicates that layer should not bail-out of an * API call that had validation failures. This may mean that the * app dies inside the driver due to invalid parameter(s). * That's what would happen without validation layers, so we'll * keep that behavior here. */ return FALSE; }
static void _mem_free (GstAllocator * allocator, GstMemory * memory) { GstGLBaseMemory *mem = (GstGLBaseMemory *) memory; GST_CAT_TRACE (GST_CAT_GL_BASE_MEMORY, "freeing buffer memory:%p", mem); gst_gl_context_thread_add (mem->context, (GstGLContextThreadFunc) _destroy_gl_objects, mem); g_mutex_clear (&mem->lock); if (mem->alloc_data) { g_free (mem->alloc_data); mem->alloc_data = NULL; } mem->data = NULL; if (mem->notify) mem->notify (mem->user_data); gst_object_unref (mem->context); g_free (memory); }
/** * gst_mini_object_lock: * @object: the mini-object to lock * @flags: #GstLockFlags * * Lock the mini-object with the specified access mode in @flags. * * Returns: %TRUE if @object could be locked. */ gboolean gst_mini_object_lock (GstMiniObject * object, GstLockFlags flags) { gint access_mode, state, newstate; g_return_val_if_fail (object != NULL, FALSE); g_return_val_if_fail (GST_MINI_OBJECT_IS_LOCKABLE (object), FALSE); if (G_UNLIKELY (object->flags & GST_MINI_OBJECT_FLAG_LOCK_READONLY && flags & GST_LOCK_FLAG_WRITE)) return FALSE; do { access_mode = flags & FLAG_MASK; newstate = state = g_atomic_int_get (&object->lockstate); GST_CAT_TRACE (GST_CAT_LOCKING, "lock %p: state %08x, access_mode %d", object, state, access_mode); if (access_mode & GST_LOCK_FLAG_EXCLUSIVE) { /* shared ref */ newstate += SHARE_ONE; access_mode &= ~GST_LOCK_FLAG_EXCLUSIVE; } /* shared counter > 1 and write access is not allowed */ if (((state & GST_LOCK_FLAG_WRITE) != 0 || (access_mode & GST_LOCK_FLAG_WRITE) != 0) && IS_SHARED (newstate)) goto lock_failed; if (access_mode) { if ((state & LOCK_FLAG_MASK) == 0) { /* nothing mapped, set access_mode */ newstate |= access_mode; } else { /* access_mode must match */ if ((state & access_mode) != access_mode) goto lock_failed; } /* increase refcount */ newstate += LOCK_ONE; } } while (!g_atomic_int_compare_and_exchange (&object->lockstate, state, newstate)); return TRUE; lock_failed: { GST_CAT_DEBUG (GST_CAT_LOCKING, "lock failed %p: state %08x, access_mode %d", object, state, access_mode); return FALSE; } }
static void _download_transfer (GstGLContext * context, GstGLMemoryPBO * gl_mem) { GstGLBaseMemory *mem = (GstGLBaseMemory *) gl_mem; g_mutex_lock (&mem->lock); if (_read_pixels_to_pbo (gl_mem)) GST_CAT_TRACE (GST_CAT_GL_MEMORY, "optimistic download of texture %u " "using pbo %u", gl_mem->mem.tex_id, gl_mem->pbo->id); g_mutex_unlock (&mem->lock); }
static void _download_transfer (GstGLContext * context, GstGLMemoryPBO * gl_mem) { GstGLBaseMemory *mem = (GstGLBaseMemory *) gl_mem; g_mutex_lock (&mem->lock); if (_read_pixels_to_pbo (gl_mem)) { GST_CAT_TRACE (GST_CAT_GL_MEMORY, "optimistic download of texture %u " "using pbo %u", gl_mem->mem.tex_id, gl_mem->pbo->id); GST_MEMORY_FLAG_UNSET (gl_mem, GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD); } g_mutex_unlock (&mem->lock); }
/** * gst_buffer_copy_metadata: * @dest: a destination #GstBuffer * @src: a source #GstBuffer * @flags: flags indicating what metadata fields should be copied. * * Copies the metadata from @src into @dest. The data, size and mallocdata * fields are not copied. * * @flags indicate which fields will be copied. Use #GST_BUFFER_COPY_ALL to copy * all the metadata fields. * * This function is typically called from a custom buffer copy function after * creating @dest and setting the data, size, mallocdata. * * Since: 0.10.13 */ void gst_buffer_copy_metadata (GstBuffer * dest, const GstBuffer * src, GstBufferCopyFlags flags) { g_return_if_fail (dest != NULL); g_return_if_fail (src != NULL); /* nothing to copy if the buffers are the same */ if (G_UNLIKELY (dest == src)) return; #if GST_VERSION_NANO == 1 /* we enable this extra debugging in git versions only for now */ g_warn_if_fail (gst_buffer_is_metadata_writable (dest)); #endif GST_CAT_LOG (GST_CAT_BUFFER, "copy %p to %p", src, dest); if (flags & GST_BUFFER_COPY_FLAGS) { guint mask; /* copy relevant flags */ mask = GST_BUFFER_FLAG_PREROLL | GST_BUFFER_FLAG_IN_CAPS | GST_BUFFER_FLAG_DELTA_UNIT | GST_BUFFER_FLAG_DISCONT | GST_BUFFER_FLAG_GAP | GST_BUFFER_FLAG_MEDIA1 | GST_BUFFER_FLAG_MEDIA2 | GST_BUFFER_FLAG_MEDIA3; GST_MINI_OBJECT_FLAGS (dest) |= GST_MINI_OBJECT_FLAGS (src) & mask; } if (flags & GST_BUFFER_COPY_TIMESTAMPS) { GST_BUFFER_TIMESTAMP (dest) = GST_BUFFER_TIMESTAMP (src); GST_BUFFER_DURATION (dest) = GST_BUFFER_DURATION (src); GST_BUFFER_OFFSET (dest) = GST_BUFFER_OFFSET (src); GST_BUFFER_OFFSET_END (dest) = GST_BUFFER_OFFSET_END (src); } if (flags & GST_BUFFER_COPY_CAPS) { gst_caps_replace (&GST_BUFFER_CAPS (dest), GST_BUFFER_CAPS (src)); } if ((flags & GST_BUFFER_COPY_QDATA)) { GST_CAT_TRACE (GST_CAT_BUFFER, "copying qdata from %p to %p", src, dest); gst_buffer_copy_qdata (dest, src); } }
/** * gst_mini_object_ref: * @mini_object: the mini-object * * Increase the reference count of the mini-object. * * Note that the refcount affects the writability * of @mini-object, see gst_mini_object_is_writable(). It is * important to note that keeping additional references to * GstMiniObject instances can potentially increase the number * of memcpy operations in a pipeline, especially if the miniobject * is a #GstBuffer. * * Returns: (transfer full): the mini-object. */ GstMiniObject * gst_mini_object_ref (GstMiniObject * mini_object) { g_return_val_if_fail (mini_object != NULL, NULL); /* we can't assert that the refcount > 0 since the _free functions * increments the refcount from 0 to 1 again to allow resurecting * the object g_return_val_if_fail (mini_object->refcount > 0, NULL); */ GST_CAT_TRACE (GST_CAT_REFCOUNTING, "%p ref %d->%d", mini_object, GST_MINI_OBJECT_REFCOUNT_VALUE (mini_object), GST_MINI_OBJECT_REFCOUNT_VALUE (mini_object) + 1); g_atomic_int_inc (&mini_object->refcount); return mini_object; }
static void _vk_mem_free (GstAllocator * allocator, GstMemory * memory) { GstVulkanMemory *mem = (GstVulkanMemory *) memory; GST_CAT_TRACE (GST_CAT_VULKAN_MEMORY, "freeing buffer memory:%p " "id:%" G_GUINT64_FORMAT, mem, (guint64) mem->mem_ptr); g_mutex_clear (&mem->lock); if (mem->notify) mem->notify (mem->user_data); if (mem->mem_ptr && !mem->wrapped) vkFreeMemory (mem->device->device, mem->mem_ptr, NULL); gst_object_unref (mem->device); }
/** * gst_event_new_gap: * @timestamp: the start time (pts) of the gap * @duration: the duration of the gap * * Create a new GAP event. A gap event can be thought of as conceptually * equivalent to a buffer to signal that there is no data for a certain * amount of time. This is useful to signal a gap to downstream elements * which may wait for data, such as muxers or mixers or overlays, especially * for sparse streams such as subtitle streams. * * Returns: (transfer full): the new GAP event. */ GstEvent * gst_event_new_gap (GstClockTime timestamp, GstClockTime duration) { GstEvent *event; g_return_val_if_fail (GST_CLOCK_TIME_IS_VALID (timestamp), NULL); GST_CAT_TRACE (GST_CAT_EVENT, "creating gap %" GST_TIME_FORMAT " - " "%" GST_TIME_FORMAT " (duration: %" GST_TIME_FORMAT ")", GST_TIME_ARGS (timestamp), GST_TIME_ARGS (timestamp + duration), GST_TIME_ARGS (duration)); event = gst_event_new_custom (GST_EVENT_GAP, gst_structure_new_id (GST_QUARK (EVENT_GAP), GST_QUARK (TIMESTAMP), GST_TYPE_CLOCK_TIME, timestamp, GST_QUARK (DURATION), GST_TYPE_CLOCK_TIME, duration, NULL)); return event; }
static gpointer _pbo_download_transfer (GstGLMemoryPBO * gl_mem, GstMapInfo * info, gsize size) { GstMapInfo *pbo_info; gl_mem->pbo->target = GL_PIXEL_PACK_BUFFER; /* texture -> pbo */ if (info->flags & GST_MAP_READ && GST_MEMORY_FLAG_IS_SET (gl_mem, GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD)) { GstMapInfo info; GST_CAT_TRACE (GST_CAT_GL_MEMORY, "attempting download of texture %u " "using pbo %u", gl_mem->mem.tex_id, gl_mem->pbo->id); if (!gst_memory_map (GST_MEMORY_CAST (gl_mem->pbo), &info, GST_MAP_WRITE | GST_MAP_GL)) { GST_CAT_WARNING (GST_CAT_GL_MEMORY, "Failed to write to PBO"); return NULL; } if (!_read_pixels_to_pbo (gl_mem)) { gst_memory_unmap (GST_MEMORY_CAST (gl_mem->pbo), &info); return NULL; } gst_memory_unmap (GST_MEMORY_CAST (gl_mem->pbo), &info); } pbo_info = g_new0 (GstMapInfo, 1); /* pbo -> data */ /* get a cpu accessible mapping from the pbo */ if (!gst_memory_map (GST_MEMORY_CAST (gl_mem->pbo), pbo_info, info->flags)) { GST_CAT_ERROR (GST_CAT_GL_MEMORY, "Failed to map pbo"); g_free (pbo_info); return NULL; } info->user_data[0] = pbo_info; return pbo_info->data; }
/** * gst_mini_object_steal: * @olddata: (inout) (transfer full): pointer to a pointer to a mini-object to * be stolen * * Replace the current #GstMiniObject pointer to by @olddata with NULL and * return the old value. * * Returns: the #GstMiniObject at @oldata */ GstMiniObject * gst_mini_object_steal (GstMiniObject ** olddata) { GstMiniObject *olddata_val; g_return_val_if_fail (olddata != NULL, NULL); GST_CAT_TRACE (GST_CAT_REFCOUNTING, "steal %p (%d)", *olddata, *olddata ? (*olddata)->refcount : 0); do { olddata_val = g_atomic_pointer_get ((gpointer *) olddata); if (olddata_val == NULL) break; } while (G_UNLIKELY (!g_atomic_pointer_compare_and_exchange ((gpointer *) olddata, olddata_val, NULL))); return olddata_val; }
static void _mem_free (GstAllocator * allocator, GstMemory * memory) { GstGLBaseBuffer *mem = (GstGLBaseBuffer *) memory; GST_CAT_TRACE (GST_CAT_GL_BASE_BUFFER, "freeing buffer memory:%p id:%u", mem, mem->id); gst_gl_context_thread_add (mem->context, (GstGLContextThreadFunc) _destroy_gl_objects, mem); g_mutex_clear (&mem->lock); if (mem->alloc_data) { g_free (mem->alloc_data); mem->alloc_data = NULL; } mem->data = NULL; gst_object_unref (mem->context); }
/** * gst_mini_object_unref: * @mini_object: the mini-object * * Decreases the reference count of the mini-object, possibly freeing * the mini-object. */ void gst_mini_object_unref (GstMiniObject * mini_object) { g_return_if_fail (mini_object != NULL); GST_CAT_TRACE (GST_CAT_REFCOUNTING, "%p unref %d->%d", mini_object, GST_MINI_OBJECT_REFCOUNT_VALUE (mini_object), GST_MINI_OBJECT_REFCOUNT_VALUE (mini_object) - 1); g_return_if_fail (mini_object->refcount > 0); if (G_UNLIKELY (g_atomic_int_dec_and_test (&mini_object->refcount))) { gboolean do_free; if (mini_object->dispose) do_free = mini_object->dispose (mini_object); else do_free = TRUE; /* if the subclass recycled the object (and returned FALSE) we don't * want to free the instance anymore */ if (G_LIKELY (do_free)) { /* there should be no outstanding locks */ g_return_if_fail ((g_atomic_int_get (&mini_object->lockstate) & LOCK_MASK) < 4); if (mini_object->n_qdata) { call_finalize_notify (mini_object); g_free (mini_object->qdata); } #ifndef GST_DISABLE_TRACE _gst_alloc_trace_free (_gst_mini_object_trace, mini_object); #endif if (mini_object->free) mini_object->free (mini_object); } } }
static void gst_buffer_copy_qdata (GstBuffer * dest, const GstBuffer * src) { GstBufferPrivate *priv; GQueue qdata_copy = G_QUEUE_INIT; GList *l; if (G_LIKELY (src->priv == NULL)) return; for (l = src->priv->qdata; l != NULL; l = l->next) { GstStructure *s = gst_structure_copy (l->data); gst_structure_set_parent_refcount (s, &dest->mini_object.refcount); g_queue_push_tail (&qdata_copy, s); GST_CAT_TRACE (GST_CAT_BUFFER, "copying qdata '%s' from buffer %p to %p", g_quark_to_string (s->name), src, dest); } priv = gst_buffer_ensure_priv (dest); priv->qdata = qdata_copy.head; }
static void _mem_create_gl (GstGLContext * context, struct create_data *transfer) { GstGLBaseMemoryAllocatorClass *alloc_class; GError *error = NULL; GST_CAT_TRACE (GST_CAT_GL_BASE_MEMORY, "Create memory %p", transfer->mem); alloc_class = GST_GL_BASE_MEMORY_ALLOCATOR_GET_CLASS (transfer->mem->mem.allocator); g_return_if_fail (alloc_class->create != NULL); transfer->mem->query = gst_gl_query_new (context, GST_GL_QUERY_TIME_ELAPSED); if ((transfer->result = alloc_class->create (transfer->mem, &error))) return; g_assert (error != NULL); GST_CAT_ERROR (GST_CAT_GL_BASE_MEMORY, "Failed to create GL buffer: %s", error->message); g_clear_error (&error); }