示例#1
0
static void
gst_base_video_decoder_init (GstBaseVideoDecoder * base_video_decoder,
    GstBaseVideoDecoderClass * klass)
{
  GstPad *pad;

  GST_DEBUG ("gst_base_video_decoder_init");

  pad = GST_BASE_VIDEO_CODEC_SINK_PAD (base_video_decoder);

  gst_pad_set_activatepush_function (pad,
      gst_base_video_decoder_sink_activate_push);
  gst_pad_set_chain_function (pad, gst_base_video_decoder_chain);
  gst_pad_set_event_function (pad, gst_base_video_decoder_sink_event);
  gst_pad_set_setcaps_function (pad, gst_base_video_decoder_sink_setcaps);
  gst_pad_set_query_function (pad, gst_base_video_decoder_sink_query);

  pad = GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder);

  gst_pad_set_event_function (pad, gst_base_video_decoder_src_event);
  gst_pad_set_query_type_function (pad, gst_base_video_decoder_get_query_types);
  gst_pad_set_query_function (pad, gst_base_video_decoder_src_query);

  base_video_decoder->input_adapter = gst_adapter_new ();
  base_video_decoder->output_adapter = gst_adapter_new ();

  gst_segment_init (&base_video_decoder->state.segment, GST_FORMAT_TIME);
  gst_base_video_decoder_reset (base_video_decoder);

  base_video_decoder->current_frame =
      gst_base_video_decoder_new_frame (base_video_decoder);

  base_video_decoder->sink_clipping = TRUE;
}
示例#2
0
static void
gst_base_video_decoder_finalize (GObject * object)
{
  GstBaseVideoDecoder *base_video_decoder;
  GstBaseVideoDecoderClass *base_video_decoder_class;

  g_return_if_fail (GST_IS_BASE_VIDEO_DECODER (object));
  base_video_decoder = GST_BASE_VIDEO_DECODER (object);
  base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (object);

  gst_base_video_decoder_reset (base_video_decoder);

  if (base_video_decoder->input_adapter) {
    g_object_unref (base_video_decoder->input_adapter);
    base_video_decoder->input_adapter = NULL;
  }
  if (base_video_decoder->output_adapter) {
    g_object_unref (base_video_decoder->output_adapter);
    base_video_decoder->output_adapter = NULL;
  }

  GST_DEBUG_OBJECT (object, "finalize");

  G_OBJECT_CLASS (parent_class)->finalize (object);
}
示例#3
0
static void
gst_base_video_decoder_flush (GstBaseVideoDecoder * base_video_decoder)
{
  GstBaseVideoDecoderClass *base_video_decoder_class;

  gst_base_video_decoder_reset (base_video_decoder);

  base_video_decoder_class =
      GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder);

  if (base_video_decoder_class->flush)
    base_video_decoder_class->flush (base_video_decoder);
}
示例#4
0
static gboolean
gst_base_video_decoder_start (GstBaseVideoDecoder * base_video_decoder)
{
  GstBaseVideoDecoderClass *base_video_decoder_class;

  GST_DEBUG ("start");

  base_video_decoder_class =
      GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder);

  gst_base_video_decoder_reset (base_video_decoder);

  if (base_video_decoder_class->start)
    return base_video_decoder_class->start (base_video_decoder);

  return TRUE;
}
示例#5
0
static gboolean
gst_base_video_decoder_start (GstBaseVideoDecoder * base_video_decoder)
{
  GstBaseVideoDecoderClass *base_video_decoder_class;

  GST_DEBUG ("start");

  base_video_decoder_class =
      GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder);

  gst_base_video_decoder_reset (base_video_decoder);
  gst_base_video_decoder_reset_state (&base_video_decoder->state);

  gst_segment_init (&base_video_decoder->segment, GST_FORMAT_TIME);

  if (base_video_decoder_class->start)
    return base_video_decoder_class->start (base_video_decoder);

  return TRUE;
}
示例#6
0
static GstFlowReturn
gst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf)
{
  GstBaseVideoDecoder *base_video_decoder;
  GstBaseVideoDecoderClass *klass;
  GstBuffer *buffer;
  GstFlowReturn ret;

  GST_DEBUG ("chain %" G_GINT64_FORMAT, GST_BUFFER_TIMESTAMP (buf));

#if 0
  /* requiring the pad to be negotiated makes it impossible to use
   * oggdemux or filesrc ! decoder */
  if (!gst_pad_is_negotiated (pad)) {
    GST_DEBUG ("not negotiated");
    return GST_FLOW_NOT_NEGOTIATED;
  }
#endif

  base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad));
  klass = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder);

  GST_DEBUG_OBJECT (base_video_decoder, "chain");

  if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))) {
    GST_DEBUG_OBJECT (base_video_decoder, "received DISCONT buffer");
    if (base_video_decoder->started) {
      gst_base_video_decoder_reset (base_video_decoder);
    }
  }

  if (!base_video_decoder->started) {
    klass->start (base_video_decoder);
    base_video_decoder->started = TRUE;
  }

  if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE) {
    GST_DEBUG ("timestamp %" G_GINT64_FORMAT " offset %" G_GINT64_FORMAT,
        GST_BUFFER_TIMESTAMP (buf), base_video_decoder->offset);
    base_video_decoder->last_sink_timestamp = GST_BUFFER_TIMESTAMP (buf);
  }
  if (GST_BUFFER_OFFSET_END (buf) != -1) {
    GST_DEBUG ("gp %" G_GINT64_FORMAT, GST_BUFFER_OFFSET_END (buf));
    base_video_decoder->last_sink_offset_end = GST_BUFFER_OFFSET_END (buf);
  }
  base_video_decoder->offset += GST_BUFFER_SIZE (buf);

#if 0
  if (base_video_decoder->timestamp_offset == GST_CLOCK_TIME_NONE &&
      GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE) {
    GST_DEBUG ("got new offset %lld", GST_BUFFER_TIMESTAMP (buf));
    base_video_decoder->timestamp_offset = GST_BUFFER_TIMESTAMP (buf);
  }
#endif

  if (base_video_decoder->current_frame == NULL) {
    base_video_decoder->current_frame =
        gst_base_video_decoder_new_frame (base_video_decoder);
  }

  gst_adapter_push (base_video_decoder->input_adapter, buf);

  if (!base_video_decoder->have_sync) {
    int n, m;

    GST_DEBUG ("no sync, scanning");

    n = gst_adapter_available (base_video_decoder->input_adapter);
    m = klass->scan_for_sync (base_video_decoder, FALSE, 0, n);

    if (m < 0) {
      g_warning ("subclass returned negative scan %d", m);
    }

    if (m >= n) {
      g_warning ("subclass scanned past end %d >= %d", m, n);
    }

    gst_adapter_flush (base_video_decoder->input_adapter, m);

    if (m < n) {
      GST_DEBUG ("found possible sync after %d bytes (of %d)", m, n);

      /* this is only "maybe" sync */
      base_video_decoder->have_sync = TRUE;
    }

    if (!base_video_decoder->have_sync) {
      gst_object_unref (base_video_decoder);
      return GST_FLOW_OK;
    }
  }

  /* FIXME: use gst_adapter_prev_timestamp() here instead? */
  buffer = gst_adapter_get_buffer (base_video_decoder->input_adapter);

  base_video_decoder->buffer_timestamp = GST_BUFFER_TIMESTAMP (buffer);
  gst_buffer_unref (buffer);

  do {
    ret = klass->parse_data (base_video_decoder, FALSE);
  } while (ret == GST_FLOW_OK);

  if (ret == GST_BASE_VIDEO_DECODER_FLOW_NEED_DATA) {
    gst_object_unref (base_video_decoder);
    return GST_FLOW_OK;
  }

  gst_object_unref (base_video_decoder);
  return ret;
}
示例#7
0
static GstFlowReturn
gst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf)
{
  GstBaseVideoDecoder *base_video_decoder;
  GstBaseVideoDecoderClass *klass;
  GstFlowReturn ret;

  GST_DEBUG ("chain %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT,
      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
      GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));

#if 0
  /* requiring the pad to be negotiated makes it impossible to use
   * oggdemux or filesrc ! decoder */
  if (!gst_pad_is_negotiated (pad)) {
    GST_DEBUG ("not negotiated");
    return GST_FLOW_NOT_NEGOTIATED;
  }
#endif

  base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad));
  klass = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder);

  GST_DEBUG_OBJECT (base_video_decoder, "chain");

  if (!base_video_decoder->have_segment) {
    GstEvent *event;
    GstFlowReturn ret;

    GST_WARNING
        ("Received buffer without a new-segment. Assuming timestamps start from 0.");

    gst_segment_set_newsegment_full (&base_video_decoder->segment,
        FALSE, 1.0, 1.0, GST_FORMAT_TIME, 0, GST_CLOCK_TIME_NONE, 0);
    base_video_decoder->have_segment = TRUE;

    event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0,
        GST_CLOCK_TIME_NONE, 0);

    ret =
        gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder),
        event);
    if (!ret) {
      GST_ERROR ("new segment event ret=%d", ret);
      return GST_FLOW_ERROR;
    }
  }

  if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))) {
    GST_DEBUG_OBJECT (base_video_decoder, "received DISCONT buffer");
    gst_base_video_decoder_reset (base_video_decoder);
  }

  if (!base_video_decoder->started) {
    klass->start (base_video_decoder);
    base_video_decoder->started = TRUE;
  }

  if (base_video_decoder->current_frame == NULL) {
    base_video_decoder->current_frame =
        gst_base_video_decoder_new_frame (base_video_decoder);
  }

  if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
    gst_base_video_decoder_add_timestamp (base_video_decoder, buf);
  }
  base_video_decoder->input_offset += GST_BUFFER_SIZE (buf);

#if 0
  if (base_video_decoder->timestamp_offset == GST_CLOCK_TIME_NONE &&
      GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE) {
    GST_DEBUG ("got new offset %" GST_TIME_FORMAT,
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)));
    base_video_decoder->timestamp_offset = GST_BUFFER_TIMESTAMP (buf);
  }
#endif

  if (base_video_decoder->packetized) {
    base_video_decoder->current_frame->sink_buffer = buf;

    ret = gst_base_video_decoder_have_frame_2 (base_video_decoder);
  } else {

    gst_adapter_push (base_video_decoder->input_adapter, buf);

    if (!base_video_decoder->have_sync) {
      int n, m;

      GST_DEBUG ("no sync, scanning");

      n = gst_adapter_available (base_video_decoder->input_adapter);
      m = klass->scan_for_sync (base_video_decoder, FALSE, 0, n);
      if (m == -1) {
        gst_object_unref (base_video_decoder);
        return GST_FLOW_OK;
      }

      if (m < 0) {
        g_warning ("subclass returned negative scan %d", m);
      }

      if (m >= n) {
        GST_ERROR ("subclass scanned past end %d >= %d", m, n);
      }

      gst_adapter_flush (base_video_decoder->input_adapter, m);

      if (m < n) {
        GST_DEBUG ("found possible sync after %d bytes (of %d)", m, n);

        /* this is only "maybe" sync */
        base_video_decoder->have_sync = TRUE;
      }

      if (!base_video_decoder->have_sync) {
        gst_object_unref (base_video_decoder);
        return GST_FLOW_OK;
      }
    }

    do {
      ret = klass->parse_data (base_video_decoder, FALSE);
    } while (ret == GST_FLOW_OK);

    if (ret == GST_BASE_VIDEO_DECODER_FLOW_NEED_DATA) {
      gst_object_unref (base_video_decoder);
      return GST_FLOW_OK;
    }
  }

  gst_object_unref (base_video_decoder);
  return ret;
}
示例#8
0
static gboolean
gst_base_video_decoder_sink_event (GstPad * pad, GstEvent * event)
{
  GstBaseVideoDecoder *base_video_decoder;
  GstBaseVideoDecoderClass *base_video_decoder_class;
  gboolean ret = FALSE;

  base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad));
  base_video_decoder_class =
      GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder);

  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_EOS:
    {
      if (!base_video_decoder->packetized) {
        GstFlowReturn flow_ret;

        do {
          flow_ret =
              base_video_decoder_class->parse_data (base_video_decoder, TRUE);
        } while (flow_ret == GST_FLOW_OK);
      }

      if (base_video_decoder_class->finish) {
        base_video_decoder_class->finish (base_video_decoder);
      }

      ret =
          gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder),
          event);
    }
      break;
    case GST_EVENT_NEWSEGMENT:
    {
      gboolean update;
      double rate;
      double applied_rate;
      GstFormat format;
      gint64 start;
      gint64 stop;
      gint64 position;
      GstSegment *segment = &base_video_decoder->segment;

      gst_event_parse_new_segment_full (event, &update, &rate,
          &applied_rate, &format, &start, &stop, &position);

      if (format != GST_FORMAT_TIME)
        goto newseg_wrong_format;

      if (!update) {
        gst_base_video_decoder_reset (base_video_decoder);
      }

      base_video_decoder->timestamp_offset = start;

      gst_segment_set_newsegment_full (segment,
          update, rate, applied_rate, format, start, stop, position);
      base_video_decoder->have_segment = TRUE;

      GST_WARNING ("new segment: format %d rate %g start %" GST_TIME_FORMAT
          " stop %" GST_TIME_FORMAT
          " position %" GST_TIME_FORMAT
          " update %d",
          format, rate,
          GST_TIME_ARGS (segment->start),
          GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time), update);

      ret =
          gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder),
          event);
    }
      break;
    case GST_EVENT_FLUSH_STOP:{
      GST_OBJECT_LOCK (base_video_decoder);
      base_video_decoder->earliest_time = GST_CLOCK_TIME_NONE;
      base_video_decoder->proportion = 0.5;
      GST_OBJECT_UNLOCK (base_video_decoder);
    }
    default:
      /* FIXME this changes the order of events */
      ret =
          gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder),
          event);
      break;
  }

done:
  gst_object_unref (base_video_decoder);
  return ret;

newseg_wrong_format:
  {
    GST_DEBUG_OBJECT (base_video_decoder, "received non TIME newsegment");
    gst_event_unref (event);
    goto done;
  }
}