Exemple #1
0
static void
default_release_buffer (GstBufferPool * pool, GstBuffer * buffer)
{
  GST_LOG_OBJECT (pool, "released buffer %p %d", buffer,
      GST_MINI_OBJECT_FLAGS (buffer));

  /* memory should be untouched */
  if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY))
    goto discard;

  /* size should have been reset. This is not a catch all, pool with
   * size requirement per memory should do their own check. */
  if (gst_buffer_get_size (buffer) != pool->priv->size)
    goto discard;

  /* all memory should be exclusive to this buffer (and thus be writable) */
  if (!gst_buffer_is_all_memory_writable (buffer))
    goto discard;

  /* keep it around in our queue */
  gst_atomic_queue_push (pool->priv->queue, buffer);
  gst_poll_write_control (pool->priv->poll);

  return;

discard:
  {
    do_free_buffer (pool, buffer);
    return;
  }
}
static void
gst_v4l2_allocator_release (GstV4l2Allocator * allocator, GstV4l2Memory * mem)
{
  GstV4l2MemoryGroup *group = mem->group;

  GST_LOG_OBJECT (allocator, "plane %i of buffer %u released",
      mem->plane, group->buffer.index);

  switch (allocator->memory) {
    case V4L2_MEMORY_DMABUF:
      close (mem->dmafd);
      mem->dmafd = -1;
      break;
    case V4L2_MEMORY_USERPTR:
      mem->data = NULL;
      break;
    default:
      break;
  }

  /* When all memory are back, put the group back in the free queue */
  if (g_atomic_int_dec_and_test (&group->mems_allocated)) {
    GST_LOG_OBJECT (allocator, "buffer %u released", group->buffer.index);
    gst_atomic_queue_push (allocator->free_queue, group);
    g_signal_emit (allocator, gst_v4l2_allocator_signals[GROUP_RELEASED], 0);
  }

  /* Keep last, allocator may be freed after this call */
  g_object_unref (allocator);
}
static void
default_release_buffer (GstBufferPool * pool, GstBuffer * buffer)
{
  /* keep it around in our queue */
  GST_LOG_OBJECT (pool, "released buffer %p", buffer);
  gst_atomic_queue_push (pool->priv->queue, buffer);
  gst_poll_write_control (pool->priv->poll);
}
Exemple #4
0
/* Emitted by vlcvideosink for every buffer,
 * Adds the buffer to the queue */
static void frame_handoff_cb( GstElement *p_ele, GstBuffer *p_buf,
        gpointer p_data )
{
    VLC_UNUSED( p_ele );
    decoder_t *p_dec = p_data;
    decoder_sys_t *p_sys = p_dec->p_sys;

    /* Push the buffer to the queue */
    gst_atomic_queue_push( p_sys->p_que, gst_buffer_ref( p_buf ) );
}
static void
default_release_buffer (GstBufferPool * pool, GstBuffer * buffer)
{
  GST_LOG_OBJECT (pool, "released buffer %p %d", buffer,
      GST_MINI_OBJECT_FLAGS (buffer));

  /* memory should be untouched */
  if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY)))
    goto memory_tagged;

  /* size should have been reset. This is not a catch all, pool with
   * size requirement per memory should do their own check. */
  if (G_UNLIKELY (gst_buffer_get_size (buffer) != pool->priv->size))
    goto size_changed;

  /* all memory should be exclusive to this buffer (and thus be writable) */
  if (G_UNLIKELY (!gst_buffer_is_all_memory_writable (buffer)))
    goto not_writable;

  /* keep it around in our queue */
  gst_atomic_queue_push (pool->priv->queue, buffer);
  gst_poll_write_control (pool->priv->poll);

  return;

memory_tagged:
  {
    GST_CAT_DEBUG_OBJECT (GST_CAT_PERFORMANCE, pool,
        "discarding buffer %p: memory tag set", buffer);
    goto discard;
  }
size_changed:
  {
    GST_CAT_DEBUG_OBJECT (GST_CAT_PERFORMANCE, pool,
        "discarding buffer %p: size %" G_GSIZE_FORMAT " != %u",
        buffer, gst_buffer_get_size (buffer), pool->priv->size);
    goto discard;
  }
not_writable:
  {
    GST_CAT_DEBUG_OBJECT (GST_CAT_PERFORMANCE, pool,
        "discarding buffer %p: memory not writable", buffer);
    goto discard;
  }
discard:
  {
    do_free_buffer (pool, buffer);
    return;
  }
}
static void
_cleanup_failed_alloc (GstV4l2Allocator * allocator, GstV4l2MemoryGroup * group)
{
  if (group->mems_allocated > 0) {
    gint i;
    /* If one or more mmap worked, we need to unref the memory, otherwise
     * they will keep a ref on the allocator and leak it. This will put back
     * the group into the free_queue */
    for (i = 0; i < group->n_mem; i++)
      gst_memory_unref (group->mem[i]);
  } else {
    /* Otherwise, group has to be on free queue for _stop() to work */
    gst_atomic_queue_push (allocator->free_queue, group);
  }
}
guint
gst_v4l2_allocator_start (GstV4l2Allocator * allocator, guint32 count,
    guint32 memory)
{
  struct v4l2_requestbuffers breq = { count, allocator->type, memory };
  gboolean can_allocate;
  gint i;

  g_return_val_if_fail (count != 0, 0);

  GST_OBJECT_LOCK (allocator);

  if (g_atomic_int_get (&allocator->active))
    goto already_active;

  if (v4l2_ioctl (allocator->video_fd, VIDIOC_REQBUFS, &breq) < 0)
    goto reqbufs_failed;

  if (breq.count < 1)
    goto out_of_memory;

  switch (memory) {
    case V4L2_MEMORY_MMAP:
      can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, MMAP);
      break;
    case V4L2_MEMORY_USERPTR:
      can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, USERPTR);
      break;
    case V4L2_MEMORY_DMABUF:
      can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, DMABUF);
      break;
    default:
      can_allocate = FALSE;
      break;
  }

  GST_DEBUG_OBJECT (allocator, "allocated %u %s buffers out of %u requested",
      breq.count, memory_type_to_str (memory), count);

  allocator->can_allocate = can_allocate;
  allocator->count = breq.count;
  allocator->memory = memory;

  /* Create memory groups */
  for (i = 0; i < allocator->count; i++) {
    allocator->groups[i] = gst_v4l2_memory_group_new (allocator, i);
    if (allocator->groups[i] == NULL)
      goto error;

    gst_atomic_queue_push (allocator->free_queue, allocator->groups[i]);
  }

  g_atomic_int_set (&allocator->active, TRUE);

done:
  GST_OBJECT_UNLOCK (allocator);
  return breq.count;

already_active:
  {
    GST_ERROR_OBJECT (allocator, "allocator already active");
    goto error;
  }
reqbufs_failed:
  {
    GST_ERROR_OBJECT (allocator,
        "error requesting %d buffers: %s", count, g_strerror (errno));
    goto error;
  }
out_of_memory:
  {
    GST_ERROR_OBJECT (allocator, "Not enough memory to allocate buffers");
    goto error;
  }
error:
  {
    breq.count = 0;
    goto done;
  }
}
Exemple #8
0
/**
 * gst_bus_post:
 * @bus: a #GstBus to post on
 * @message: (transfer full): the #GstMessage to post
 *
 * Post a message on the given bus. Ownership of the message
 * is taken by the bus.
 *
 * Returns: %TRUE if the message could be posted, %FALSE if the bus is flushing.
 *
 * MT safe.
 */
gboolean
gst_bus_post (GstBus * bus, GstMessage * message)
{
  GstBusSyncReply reply = GST_BUS_PASS;
  GstBusSyncHandler handler;
  gboolean emit_sync_message;
  gpointer handler_data;

  g_return_val_if_fail (GST_IS_BUS (bus), FALSE);
  g_return_val_if_fail (GST_IS_MESSAGE (message), FALSE);

  GST_DEBUG_OBJECT (bus, "[msg %p] posting on bus %" GST_PTR_FORMAT, message,
      message);

  /* check we didn't accidentally add a public flag that maps to same value */
  g_assert (!GST_MINI_OBJECT_FLAG_IS_SET (message,
          GST_MESSAGE_FLAG_ASYNC_DELIVERY));

  GST_OBJECT_LOCK (bus);
  /* check if the bus is flushing */
  if (GST_OBJECT_FLAG_IS_SET (bus, GST_BUS_FLUSHING))
    goto is_flushing;

  handler = bus->priv->sync_handler;
  handler_data = bus->priv->sync_handler_data;
  emit_sync_message = bus->priv->num_sync_message_emitters > 0;
  GST_OBJECT_UNLOCK (bus);

  /* first call the sync handler if it is installed */
  if (handler)
    reply = handler (bus, message, handler_data);

  /* emit sync-message if requested to do so via
     gst_bus_enable_sync_message_emission. terrible but effective */
  if (emit_sync_message && reply != GST_BUS_DROP
      && handler != gst_bus_sync_signal_handler)
    gst_bus_sync_signal_handler (bus, message, NULL);

  /* If this is a bus without async message delivery
   * always drop the message */
  if (!bus->priv->poll)
    reply = GST_BUS_DROP;

  /* now see what we should do with the message */
  switch (reply) {
    case GST_BUS_DROP:
      /* drop the message */
      GST_DEBUG_OBJECT (bus, "[msg %p] dropped", message);
      break;
    case GST_BUS_PASS:
      /* pass the message to the async queue, refcount passed in the queue */
      GST_DEBUG_OBJECT (bus, "[msg %p] pushing on async queue", message);
      gst_atomic_queue_push (bus->priv->queue, message);
      gst_poll_write_control (bus->priv->poll);
      GST_DEBUG_OBJECT (bus, "[msg %p] pushed on async queue", message);

      break;
    case GST_BUS_ASYNC:
    {
      /* async delivery, we need a mutex and a cond to block
       * on */
      GCond *cond = GST_MESSAGE_GET_COND (message);
      GMutex *lock = GST_MESSAGE_GET_LOCK (message);

      g_cond_init (cond);
      g_mutex_init (lock);

      GST_MINI_OBJECT_FLAG_SET (message, GST_MESSAGE_FLAG_ASYNC_DELIVERY);

      GST_DEBUG_OBJECT (bus, "[msg %p] waiting for async delivery", message);

      /* now we lock the message mutex, send the message to the async
       * queue. When the message is handled by the app and destroyed,
       * the cond will be signalled and we can continue */
      g_mutex_lock (lock);

      gst_atomic_queue_push (bus->priv->queue, message);
      gst_poll_write_control (bus->priv->poll);

      /* now block till the message is freed */
      g_cond_wait (cond, lock);

      /* we acquired a new ref from gst_message_dispose() so we can clean up */
      g_mutex_unlock (lock);

      GST_DEBUG_OBJECT (bus, "[msg %p] delivered asynchronously", message);

      GST_MINI_OBJECT_FLAG_UNSET (message, GST_MESSAGE_FLAG_ASYNC_DELIVERY);

      g_mutex_clear (lock);
      g_cond_clear (cond);

      gst_message_unref (message);
      break;
    }
    default:
      g_warning ("invalid return from bus sync handler");
      break;
  }
  return TRUE;

  /* ERRORS */
is_flushing:
  {
    GST_DEBUG_OBJECT (bus, "bus is flushing");
    GST_OBJECT_UNLOCK (bus);
    gst_message_unref (message);

    return FALSE;
  }
}
static void gst_scream_queue_srcpad_loop(GstScreamQueue *self)
{
    GstScreamDataQueueItem *item;
    GstScreamDataQueueRtpItem *rtp_item;
    GstScreamStream *stream;
    guint stream_id;
    guint64 time_now_us, time_until_next_approve = 0;
    GstBuffer *buffer;

    time_now_us = get_gst_time_us(self);
    if (G_UNLIKELY(time_now_us == 0)) {
        goto end;
    }

    if (time_now_us >= self->next_approve_time) {
        time_until_next_approve = gst_scream_controller_approve_transmits(self->scream_controller,
            time_now_us);
    } else {
        GST_LOG_OBJECT(self, "Time is %" G_GUINT64_FORMAT ", waiting %" G_GUINT64_FORMAT,
            time_now_us, self->next_approve_time);
    }

    /* Send all approved packets */
    while (!gst_data_queue_is_empty(self->approved_packets)) {
        if (G_UNLIKELY(!gst_data_queue_pop(self->approved_packets,
            (GstDataQueueItem **)&rtp_item))) {
            GST_WARNING_OBJECT(self, "Failed to pop from approved packets queue. Flushing?");
            goto end; /* flushing */
        }

        buffer = GST_BUFFER(((GstDataQueueItem *)rtp_item)->object);
        gst_pad_push(self->src_pad, buffer);

        GST_LOG_OBJECT(self, "pushing: pt = %u, seq: %u, pass: %u", rtp_item->rtp_pt, rtp_item->rtp_seq, self->pass_through);

        if (rtp_item->adapted) {
            guint tmp_time;
            stream_id = ((GstScreamDataQueueItem *)rtp_item)->rtp_ssrc;
            tmp_time = gst_scream_controller_packet_transmitted(self->scream_controller, stream_id,
                rtp_item->rtp_payload_size, rtp_item->rtp_seq, time_now_us);
            time_until_next_approve = MIN(time_until_next_approve, tmp_time);
        }
        g_slice_free(GstScreamDataQueueRtpItem, rtp_item);
    }
    self->next_approve_time = time_now_us + time_until_next_approve;

    GST_LOG_OBJECT(self, "Popping or waiting %" G_GUINT64_FORMAT, time_until_next_approve);
    item = (GstScreamDataQueueItem *)g_async_queue_timeout_pop(self->incoming_packets, time_until_next_approve);
    if (!item) {
        goto end;
    }

    stream_id = item->rtp_ssrc;
    if (item->type == GST_SCREAM_DATA_QUEUE_ITEM_TYPE_RTP) {
        GstScreamDataQueueRtpItem *rtp_item = (GstScreamDataQueueRtpItem *)item;
        stream = get_stream(self, item->rtp_ssrc, rtp_item->rtp_pt);
        if (!stream) {
            rtp_item->adapted = FALSE;
            GST_LOG_OBJECT(self, "!adapted, approving: pt = %u, seq: %u, pass: %u",
                    rtp_item->rtp_pt, rtp_item->rtp_seq, self->pass_through);
            gst_data_queue_push(self->approved_packets, (GstDataQueueItem *)item);
        } else {
            gst_atomic_queue_push(stream->packet_queue, rtp_item);
            stream->enqueued_payload_size += rtp_item->rtp_payload_size;
            stream->enqueued_packets++;
            rtp_item->adapted = TRUE;
            self->next_approve_time = 0;
            gst_scream_controller_new_rtp_packet(self->scream_controller, stream_id, rtp_item->rtp_ts,
                rtp_item->enqueued_time, stream->enqueued_payload_size, rtp_item->rtp_payload_size);
        }
    } else { /* item->type == GST_SCREAM_DATA_QUEUE_ITEM_TYPE_RTCP */
        GstScreamDataQueueRtcpItem *rtcp_item = (GstScreamDataQueueRtcpItem *)item;

        gst_scream_controller_incoming_feedback(self->scream_controller, stream_id, time_now_us,
            rtcp_item->timestamp, rtcp_item->highest_seq, rtcp_item->n_loss, rtcp_item->n_ecn, rtcp_item->qbit);

        ((GstDataQueueItem *)item)->destroy(item);
    }

end:
    return;

}