static GstDucatiBuffer * gst_ducati_buffer_new (GstPvrBufferPool * pool) { PVR2DERROR pvr_error; GstDucatiBuffer *self = (GstDucatiBuffer *) gst_mini_object_new (GST_TYPE_DUCATIBUFFER); GST_LOG_OBJECT (pool->element, "creating buffer %p in pool %p", self, pool); self->pool = (GstPvrBufferPool *) gst_mini_object_ref (GST_MINI_OBJECT (pool)); GST_BUFFER_DATA (self) = gst_ducati_alloc_1d (pool->size); GST_BUFFER_SIZE (self) = pool->size; GST_LOG_OBJECT (pool->element, "width=%d, height=%d and size=%d", pool->padded_width, pool->padded_height, pool->size); pvr_error = PVR2DMemWrap (pool->pvr_context, GST_BUFFER_DATA (self), 0, pool->size, NULL, &(self->src_mem)); if (pvr_error != PVR2D_OK) { GST_LOG_OBJECT (pool->element, "Failed to Wrap buffer memory" "returned %d", pvr_error); } else { self->wrapped = TRUE; } gst_buffer_set_caps (GST_BUFFER (self), pool->caps); return self; }
void test_make_writable() { GstBuffer *buffer; GstMiniObject *mobj, *mobj2, *mobj3; xmlfile = "gstminiobject_test_make_writable"; std_log(LOG_FILENAME_LINE, "Test Started test_make_writable"); buffer = gst_buffer_new_and_alloc (4); mobj = GST_MINI_OBJECT (buffer); mobj2 = gst_mini_object_make_writable (mobj); fail_unless (GST_IS_BUFFER (mobj2), "make_writable did not return a buffer"); fail_unless (mobj == mobj2, "make_writable returned a copy for a buffer with refcount 1"); mobj2 = gst_mini_object_ref (mobj); mobj3 = gst_mini_object_make_writable (mobj); fail_unless (GST_IS_BUFFER (mobj3), "make_writable did not return a buffer"); fail_if (mobj == mobj3, "make_writable returned same object for a buffer with refcount > 1"); fail_unless (GST_MINI_OBJECT_REFCOUNT_VALUE (mobj) == 1, "refcount of original mobj object should be back to 1"); mobj2 = gst_mini_object_make_writable (mobj); fail_unless (GST_IS_BUFFER (mobj2), "make_writable did not return a buffer"); fail_unless (mobj == mobj2, "make_writable returned a copy for a buffer with refcount 1"); std_log(LOG_FILENAME_LINE, "Test Successful"); create_xml(0); }
void test_unref_threaded() { GstBuffer *buffer; GstMiniObject *mobj; int i; xmlfile = "gstminiobject_test_unref_threaded"; std_log(LOG_FILENAME_LINE, "Test Started test_unref_threaded"); buffer = gst_buffer_new_and_alloc (4); mobj = GST_MINI_OBJECT (buffer); for (i = 0; i < num_threads * refs_per_thread; ++i) gst_mini_object_ref (mobj); MAIN_START_THREADS (num_threads, thread_unref, mobj); MAIN_STOP_THREADS (); ASSERT_MINI_OBJECT_REFCOUNT (mobj, "miniobject", 1); /* final unref */ gst_mini_object_unref (mobj); std_log(LOG_FILENAME_LINE, "Test Successful"); create_xml(0); }
/** * gst_mini_object_replace: * @olddata: pointer to a pointer to a mini-object to be replaced * @newdata: pointer to new mini-object * * Modifies a pointer to point to a new mini-object. The modification * is done atomically, and the reference counts are updated correctly. * Either @newdata and the value pointed to by @olddata may be NULL. */ void gst_mini_object_replace (GstMiniObject ** olddata, GstMiniObject * newdata) { GstMiniObject *olddata_val; g_return_if_fail (olddata != NULL); #ifdef DEBUG_REFCOUNT GST_CAT_LOG (GST_CAT_REFCOUNTING, "replace %p (%d) with %p (%d)", *olddata, *olddata ? (*olddata)->refcount : 0, newdata, newdata ? newdata->refcount : 0); #endif olddata_val = g_atomic_pointer_get ((gpointer *) olddata); if (olddata_val == newdata) return; if (newdata) gst_mini_object_ref (newdata); while (!g_atomic_pointer_compare_and_exchange ((gpointer *) olddata, olddata_val, newdata)) { olddata_val = g_atomic_pointer_get ((gpointer *) olddata); } if (olddata_val) gst_mini_object_unref (olddata_val); }
void test_is_writable() { GstBuffer *buffer; GstMiniObject *mobj; xmlfile = "gstminiobject_test_is_writable"; std_log(LOG_FILENAME_LINE, "Test Started test_is_writable"); buffer = gst_buffer_new_and_alloc (4); mobj = GST_MINI_OBJECT (buffer); fail_unless (gst_mini_object_is_writable (mobj), "A buffer with one ref should be writable"); GST_MINI_OBJECT_FLAG_SET (mobj, GST_MINI_OBJECT_FLAG_READONLY); fail_if (gst_mini_object_is_writable (mobj), "A buffer with READONLY set should not be writable"); GST_MINI_OBJECT_FLAG_UNSET (mobj, GST_MINI_OBJECT_FLAG_READONLY); fail_unless (gst_mini_object_is_writable (mobj), "A buffer with one ref and READONLY not set should be writable"); fail_if (gst_mini_object_ref (mobj) == NULL, "Could not ref the mobj"); fail_if (gst_mini_object_is_writable (mobj), "A buffer with two refs should not be writable"); std_log(LOG_FILENAME_LINE, "Test Successful"); create_xml(0); }
/** * gst_mini_object_replace: * @olddata: (inout) (transfer full): pointer to a pointer to a mini-object to * be replaced * @newdata: pointer to new mini-object * * Atomically modifies a pointer to point to a new mini-object. * The reference count of @olddata is decreased and the reference count of * @newdata is increased. * * Either @newdata and the value pointed to by @olddata may be NULL. * * Returns: TRUE if @newdata was different from @olddata */ gboolean gst_mini_object_replace (GstMiniObject ** olddata, GstMiniObject * newdata) { GstMiniObject *olddata_val; g_return_val_if_fail (olddata != NULL, FALSE); GST_CAT_TRACE (GST_CAT_REFCOUNTING, "replace %p (%d) with %p (%d)", *olddata, *olddata ? (*olddata)->refcount : 0, newdata, newdata ? newdata->refcount : 0); olddata_val = g_atomic_pointer_get ((gpointer *) olddata); if (G_UNLIKELY (olddata_val == newdata)) return FALSE; if (newdata) gst_mini_object_ref (newdata); while (G_UNLIKELY (!g_atomic_pointer_compare_and_exchange ((gpointer *) olddata, olddata_val, newdata))) { olddata_val = g_atomic_pointer_get ((gpointer *) olddata); if (G_UNLIKELY (olddata_val == newdata)) break; } if (olddata_val) gst_mini_object_unref (olddata_val); return olddata_val != newdata; }
static GstOverlayBuffer * gst_overlay_buffer_new (GstOverlayBufferPool * pool, guint index, GstCaps * caps, guint8 *data,guint32 size) { GstOverlayBuffer *ret; ret = (GstOverlayBuffer *) gst_mini_object_new (GST_TYPE_OVERLAY_BUFFER); ret->pool = (GstOverlayBufferPool *) gst_mini_object_ref (GST_MINI_OBJECT (pool)); ret->index = index; GST_BUFFER_DATA (ret) = data; GST_BUFFER_SIZE (ret) = size; GST_BUFFER_FLAG_SET (ret, GST_BUFFER_FLAG_READONLY); gst_buffer_set_caps (GST_BUFFER (ret), caps); GST_DEBUG_OBJECT (pool->overlayelem, "creating buffer %u, %p in pool %p,size %u ,buffer address is %p", index, ret, pool, GST_BUFFER_SIZE (ret) ,GST_BUFFER_DATA (ret)); return ret; }
/** * gst_value_dup_mini_object: * @value: a valid #GValue of %GST_TYPE_MINI_OBJECT derived type * * Get the contents of a %GST_TYPE_MINI_OBJECT derived #GValue, * increasing its reference count. * * Returns: mini object contents of @value * * Since: 0.10.20 */ GstMiniObject * gst_value_dup_mini_object (const GValue * value) { g_return_val_if_fail (GST_VALUE_HOLDS_MINI_OBJECT (value), NULL); return gst_mini_object_ref (value->data[0].v_pointer); }
void MiniObject::ref(bool increaseRef) { if (Private::ObjectStore::put(this)) { if (increaseRef) { gst_mini_object_ref(GST_MINI_OBJECT(m_object)); } } }
static GstFlowReturn gst_app_sink_render_common (GstBaseSink * psink, GstMiniObject * data, gboolean is_list) { GstAppSink *appsink = GST_APP_SINK (psink); gboolean emit; g_mutex_lock (appsink->priv->mutex); if (appsink->priv->flushing) goto flushing; GST_DEBUG_OBJECT (appsink, "pushing render buffer%s %p on queue (%d)", is_list ? " list" : "", data, appsink->priv->queue->length); while (appsink->priv->max_buffers > 0 && appsink->priv->queue->length >= appsink->priv->max_buffers) { if (appsink->priv->drop) { GstMiniObject *obj; /* we need to drop the oldest buffer/list and try again */ obj = g_queue_pop_head (appsink->priv->queue); GST_DEBUG_OBJECT (appsink, "dropping old buffer/list %p", obj); gst_mini_object_unref (obj); } else { GST_DEBUG_OBJECT (appsink, "waiting for free space, length %d >= %d", appsink->priv->queue->length, appsink->priv->max_buffers); /* wait for a buffer to be removed or flush */ g_cond_wait (appsink->priv->cond, appsink->priv->mutex); if (appsink->priv->flushing) goto flushing; } } /* we need to ref the buffer when pushing it in the queue */ g_queue_push_tail (appsink->priv->queue, gst_mini_object_ref (data)); g_cond_signal (appsink->priv->cond); emit = appsink->priv->emit_signals; g_mutex_unlock (appsink->priv->mutex); if (is_list) { if (appsink->priv->callbacks.new_buffer_list) appsink->priv->callbacks.new_buffer_list (appsink, appsink->priv->user_data); } else { if (appsink->priv->callbacks.new_buffer) appsink->priv->callbacks.new_buffer (appsink, appsink->priv->user_data); else if (emit) g_signal_emit (appsink, gst_app_sink_signals[SIGNAL_NEW_BUFFER], 0); } return GST_FLOW_OK; flushing: { GST_DEBUG_OBJECT (appsink, "we are flushing"); g_mutex_unlock (appsink->priv->mutex); return GST_FLOW_WRONG_STATE; } }
static void gst_value_mini_object_copy (const GValue * src_value, GValue * dest_value) { if (src_value->data[0].v_pointer) { dest_value->data[0].v_pointer = gst_mini_object_ref (GST_MINI_OBJECT_CAST (src_value->data[0]. v_pointer)); } else { dest_value->data[0].v_pointer = NULL; } }
static gchar * gst_value_mini_object_collect (GValue * value, guint n_collect_values, GTypeCValue * collect_values, guint collect_flags) { if (collect_values[0].v_pointer) { value->data[0].v_pointer = gst_mini_object_ref (collect_values[0].v_pointer); } else { value->data[0].v_pointer = NULL; } return NULL; }
/* test thread-safe refcounting of GstMiniObject */ static void thread_ref (GstMiniObject * mobj) { int j; THREAD_START (); for (j = 0; j < refs_per_thread; ++j) { gst_mini_object_ref (mobj); if (j % num_threads == 0) THREAD_SWITCH (); } GST_DEBUG ("thread stopped"); }
static VALUE instance2robj(gpointer instance) { VALUE klass; GstEvent *event; event = instance; switch (GST_EVENT_TYPE(event)) { case GST_EVENT_FLUSH_START: klass = rb_cGstEventFlushStart; break; case GST_EVENT_FLUSH_STOP: klass = rb_cGstEventFlushStop; break; case GST_EVENT_EOS: klass = rb_cGstEventEOS; break; case GST_EVENT_NEWSEGMENT: klass = rb_cGstEventNewSegment; break; case GST_EVENT_TAG: klass = rb_cGstEventTag; break; case GST_EVENT_BUFFERSIZE: klass = rb_cGstEventBufferSize; break; case GST_EVENT_QOS: klass = rb_cGstEventQOS; break; case GST_EVENT_SEEK: klass = rb_cGstEventSeek; break; case GST_EVENT_NAVIGATION: klass = rb_cGstEventNavigation; break; case GST_EVENT_LATENCY: klass = rb_cGstEventLatency; break; default: klass = rb_cGstEvent; break; } gst_mini_object_ref(instance); return Data_Wrap_Struct(klass, NULL, _rbgst_mini_object_free, instance); }
static gchar * gst_value_mini_object_lcopy (const GValue * value, guint n_collect_values, GTypeCValue * collect_values, guint collect_flags) { gpointer *mini_object_p = collect_values[0].v_pointer; if (!mini_object_p) { return g_strdup_printf ("value location for '%s' passed as NULL", G_VALUE_TYPE_NAME (value)); } if (!value->data[0].v_pointer) *mini_object_p = NULL; else if (collect_flags & G_VALUE_NOCOPY_CONTENTS) *mini_object_p = value->data[0].v_pointer; else *mini_object_p = gst_mini_object_ref (value->data[0].v_pointer); return NULL; }
static GstBufferClassBuffer * gst_bcbuffer_new (GstBufferClassBufferPool * pool, int idx, int sz, unsigned long buf_paddr) { GstBufferClassBuffer *ret = NULL; ret = (GstBufferClassBuffer *) gst_mini_object_new (GST_TYPE_BCBUFFER); if(ret == NULL) goto fail; ret->pool = GST_BCBUFFERPOOL (gst_mini_object_ref (GST_MINI_OBJECT (pool))); ret->index = idx; GST_LOG_OBJECT (pool->elem, "creating buffer %u (sz=%d), %p in pool %p", idx, sz, ret, pool); GST_BUFFER_SIZE (ret) = sz; return ret; fail: gst_mini_object_unref (GST_MINI_OBJECT (ret)); return NULL; }
static void gst_mini_object_free (GstMiniObject * mini_object) { GstMiniObjectClass *mo_class; /* At this point, the refcount of the object is 0. We increase the refcount * here because if a subclass recycles the object and gives out a new * reference we don't want to free the instance anymore. */ gst_mini_object_ref (mini_object); mo_class = GST_MINI_OBJECT_GET_CLASS (mini_object); mo_class->finalize (mini_object); /* decrement the refcount again, if the subclass recycled the object we don't * want to free the instance anymore */ if (G_LIKELY (g_atomic_int_dec_and_test (&mini_object->refcount))) { #ifndef GST_DISABLE_TRACE gst_alloc_trace_free (_gst_mini_object_trace, mini_object); #endif g_type_free_instance ((GTypeInstance *) mini_object); } }
static VALUE instance2robj(gpointer instance, G_GNUC_UNUSED gpointer user_data) { VALUE klass; GstQuery *query; query = instance; switch (GST_QUERY_TYPE(query)) { case GST_QUERY_POSITION: klass = rb_cGstQueryPosition; break; case GST_QUERY_DURATION: klass = rb_cGstQueryDuration; break; case GST_QUERY_LATENCY: klass = rb_cGstQueryLatency; break; case GST_QUERY_SEEKING: klass = rb_cGstQuerySeeking; break; case GST_QUERY_SEGMENT: klass = rb_cGstQuerySegment; break; case GST_QUERY_CONVERT: klass = rb_cGstQueryConvert; break; case GST_QUERY_FORMATS: klass = rb_cGstQueryFormats; break; default: klass = rb_cGstQuery; break; } gst_mini_object_ref(instance); return Data_Wrap_Struct(klass, NULL, _rbgst_mini_object_free, instance); }
static VALUE instance2robj(gpointer instance) { VALUE klass; GstMessage *message; message = instance; switch (GST_MESSAGE_TYPE(message)) { case GST_MESSAGE_UNKNOWN: klass = rb_cGstMessageUnknown; break; case GST_MESSAGE_EOS: klass = rb_cGstMessageEos; break; case GST_MESSAGE_ERROR: klass = rb_cGstMessageError; break; case GST_MESSAGE_WARNING: klass = rb_cGstMessageWarning; break; case GST_MESSAGE_INFO: klass = rb_cGstMessageInfo; break; case GST_MESSAGE_TAG: klass = rb_cGstMessageTag; break; case GST_MESSAGE_BUFFERING: klass = rb_cGstMessageBuffering; break; case GST_MESSAGE_STATE_CHANGED: klass = rb_cGstMessageStateChanged; break; case GST_MESSAGE_STATE_DIRTY: klass = rb_cGstMessageStateDirty; break; case GST_MESSAGE_STEP_DONE: klass = rb_cGstMessageStepDone; break; case GST_MESSAGE_CLOCK_PROVIDE: klass = rb_cGstMessageClockProvide; break; case GST_MESSAGE_CLOCK_LOST: klass = rb_cGstMessageClockLost; break; case GST_MESSAGE_NEW_CLOCK: klass = rb_cGstMessageNewClock; break; case GST_MESSAGE_STRUCTURE_CHANGE: klass = rb_cGstMessageStructureChange; break; case GST_MESSAGE_STREAM_STATUS: klass = rb_cGstMessageStreamStatus; break; case GST_MESSAGE_APPLICATION: klass = rb_cGstMessageApplication; break; case GST_MESSAGE_ELEMENT: if (gst_is_missing_plugin_message(message)) { klass = rb_cGstMissingMessage; } else { klass = rb_cGstMessageElement; } break; case GST_MESSAGE_SEGMENT_START: klass = rb_cGstMessageSegmentStart; break; case GST_MESSAGE_SEGMENT_DONE: klass = rb_cGstMessageSegmentDone; break; case GST_MESSAGE_DURATION: klass = rb_cGstMessageDuration; break; case GST_MESSAGE_LATENCY: klass = rb_cGstMessageLatency; break; case GST_MESSAGE_ASYNC_START: klass = rb_cGstMessageAsyncStart; break; case GST_MESSAGE_ASYNC_DONE: klass = rb_cGstMessageAsyncDone; break; case GST_MESSAGE_ANY: klass = rb_cGstMessageAny; break; default: klass = rb_cGstMessage; break; } gst_mini_object_ref(instance); return Data_Wrap_Struct(klass, NULL, _rbgst_mini_object_free, instance); }
static GstV4l2Buffer * gst_v4l2_buffer_new (GstV4l2BufferPool * pool, guint index, GstCaps * caps) { GstV4l2Buffer *ret; guint8 *data; ret = (GstV4l2Buffer *) gst_mini_object_new (GST_TYPE_V4L2_BUFFER); GST_LOG_OBJECT (pool->v4l2elem, "creating buffer %u, %p in pool %p", index, ret, pool); ret->pool = (GstV4l2BufferPool *) gst_mini_object_ref (GST_MINI_OBJECT (pool)); ret->vbuffer.index = index; ret->vbuffer.type = pool->type; ret->vbuffer.memory = V4L2_MEMORY_MMAP; if (v4l2_ioctl (pool->video_fd, VIDIOC_QUERYBUF, &ret->vbuffer) < 0) goto querybuf_failed; GST_LOG_OBJECT (pool->v4l2elem, " index: %u", ret->vbuffer.index); GST_LOG_OBJECT (pool->v4l2elem, " type: %d", ret->vbuffer.type); GST_LOG_OBJECT (pool->v4l2elem, " bytesused: %u", ret->vbuffer.bytesused); GST_LOG_OBJECT (pool->v4l2elem, " flags: %08x", ret->vbuffer.flags); GST_LOG_OBJECT (pool->v4l2elem, " field: %d", ret->vbuffer.field); GST_LOG_OBJECT (pool->v4l2elem, " memory: %d", ret->vbuffer.memory); if (ret->vbuffer.memory == V4L2_MEMORY_MMAP) GST_LOG_OBJECT (pool->v4l2elem, " MMAP offset: %u", ret->vbuffer.m.offset); GST_LOG_OBJECT (pool->v4l2elem, " length: %u", ret->vbuffer.length); GST_LOG_OBJECT (pool->v4l2elem, " input: %u", ret->vbuffer.input); data = (guint8 *) v4l2_mmap (0, ret->vbuffer.length, PROT_READ | PROT_WRITE, MAP_SHARED, pool->video_fd, ret->vbuffer.m.offset); if (data == MAP_FAILED) goto mmap_failed; GST_BUFFER_DATA (ret) = data; GST_BUFFER_SIZE (ret) = ret->vbuffer.length; GST_BUFFER_FLAG_SET (ret, GST_BUFFER_FLAG_READONLY); gst_buffer_set_caps (GST_BUFFER (ret), caps); return ret; /* ERRORS */ querybuf_failed: { gint errnosave = errno; GST_WARNING ("Failed QUERYBUF: %s", g_strerror (errnosave)); gst_buffer_unref (GST_BUFFER (ret)); errno = errnosave; return NULL; } mmap_failed: { gint errnosave = errno; GST_WARNING ("Failed to mmap: %s", g_strerror (errnosave)); gst_buffer_unref (GST_BUFFER (ret)); errno = errnosave; return NULL; } }
static GstMiniObject * _gst_egl_image_copy (GstMiniObject * obj) { return gst_mini_object_ref (obj); }
/** * gst_date_time_ref: * @datetime: a #GstDateTime * * Atomically increments the reference count of @datetime by one. * * Return value: (transfer full): the reference @datetime */ GstDateTime * gst_date_time_ref (GstDateTime * datetime) { return (GstDateTime *) gst_mini_object_ref (GST_MINI_OBJECT_CAST (datetime)); }