static gboolean
sync_pad_values (GstAudioAggregator * aagg, GstAudioAggregatorPad * pad)
{
  GstAggregatorPad *bpad = GST_AGGREGATOR_PAD (pad);
  GstClockTime timestamp, stream_time;

  if (pad->priv->buffer == NULL)
    return TRUE;

  timestamp = GST_BUFFER_PTS (pad->priv->buffer);
  GST_OBJECT_LOCK (bpad);
  stream_time = gst_segment_to_stream_time (&bpad->segment, GST_FORMAT_TIME,
      timestamp);
  GST_OBJECT_UNLOCK (bpad);

  /* sync object properties on stream time */
  /* TODO: Ideally we would want to do that on every sample */
  if (GST_CLOCK_TIME_IS_VALID (stream_time))
    gst_object_sync_values (GST_OBJECT (pad), stream_time);

  return TRUE;
}
Esempio n. 2
0
/* Actual processing. */
static GstFlowReturn
gst_burn_transform_frame (GstVideoFilter * vfilter,
    GstVideoFrame * in_frame, GstVideoFrame * out_frame)
{
  GstBurn *filter = GST_BURN (vfilter);
  gint video_size, adjustment, width, height;
  guint32 *src, *dest;
  GstClockTime timestamp;
  gint64 stream_time;

  src = GST_VIDEO_FRAME_PLANE_DATA (in_frame, 0);
  dest = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0);

  width = GST_VIDEO_FRAME_WIDTH (in_frame);
  height = GST_VIDEO_FRAME_HEIGHT (in_frame);

  video_size = width * height;

  /* GstController: update the properties */
  timestamp = GST_BUFFER_TIMESTAMP (in_frame->buffer);
  stream_time =
      gst_segment_to_stream_time (&GST_BASE_TRANSFORM (filter)->segment,
      GST_FORMAT_TIME, timestamp);

  GST_DEBUG_OBJECT (filter, "sync to %" GST_TIME_FORMAT,
      GST_TIME_ARGS (timestamp));

  if (GST_CLOCK_TIME_IS_VALID (stream_time))
    gst_object_sync_values (GST_OBJECT (filter), stream_time);

  GST_OBJECT_LOCK (filter);
  adjustment = filter->adjustment;
  GST_OBJECT_UNLOCK (filter);

  /*** Now the image processing work.... ***/
  orc_gaudi_burn (dest, src, adjustment, video_size);

  return GST_FLOW_OK;
}
/**
 * gst_segment_set_running_time:
 * @segment: a #GstSegment structure.
 * @format: the format of the segment.
 * @running_time: the running_time in the segment
 *
 * Adjust the start/stop and accum values of @segment such that the next valid
 * buffer will be one with @running_time.
 *
 * Returns: %TRUE if the segment could be updated successfully. If %FALSE is
 * returned, @running_time is -1 or not in @segment.
 *
 * Since: 0.10.24
 */
gboolean
gst_segment_set_running_time (GstSegment * segment, GstFormat format,
    gint64 running_time)
{
  gint64 position;
  gint64 start, stop, last_stop;

  /* start by bringing the running_time into the segment position */
  position = gst_segment_to_position (segment, format, running_time);

  /* we must have a valid position now */
  if (G_UNLIKELY (position == -1))
    return FALSE;

  start = segment->start;
  stop = segment->stop;
  last_stop = segment->last_stop;

  if (G_LIKELY (segment->rate > 0.0)) {
    /* update the start/last_stop and time values */
    start = position;
    if (last_stop < start)
      last_stop = start;
  } else {
    /* reverse, update stop */
    stop = position;
    /* if we were past the position, go back */
    if (last_stop > stop)
      last_stop = stop;
  }
  /* and accumulated time is exactly the running time */
  segment->time = gst_segment_to_stream_time (segment, format, start);
  segment->start = start;
  segment->stop = stop;
  segment->last_stop = last_stop;
  segment->accum = running_time;

  return TRUE;
}
static GstFlowReturn
output_achain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
{
  GstClockTime timestamp;
  guint8 b;
  gboolean audio_jitter = audio_nondiscont || audio_drift || early_video;

  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  if (!audio_jitter)
    fail_unless_equals_int64 (timestamp,
        (audio_buffer_count % n_abuffers) * 1 * GST_SECOND);
  timestamp =
      gst_segment_to_stream_time (&current_audio_segment, GST_FORMAT_TIME,
      timestamp);
  if (!audio_jitter)
    fail_unless_equals_int64 (timestamp,
        (audio_buffer_count % n_abuffers) * 1 * GST_SECOND);

  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  timestamp =
      gst_segment_to_running_time (&current_audio_segment, GST_FORMAT_TIME,
      timestamp);
  if (!audio_jitter)
    fail_unless_equals_int64 (timestamp, audio_buffer_count * 1 * GST_SECOND);

  gst_buffer_extract (buffer, 0, &b, 1);

  if (per_channel) {
    fail_unless_equals_int (b, fill_value_per_channel[0]);
  } else {
    fail_unless_equals_int (b, fill_value);
  }

  audio_buffer_count++;
  gst_buffer_unref (buffer);
  return GST_FLOW_OK;
}
Esempio n. 5
0
static void
gst_multi_file_sink_post_message (GstMultiFileSink * multifilesink,
                                  GstBuffer * buffer, const char *filename)
{
    if (multifilesink->post_messages) {
        GstClockTime duration, timestamp;
        GstClockTime running_time, stream_time;
        guint64 offset, offset_end;
        GstStructure *s;
        GstSegment *segment;
        GstFormat format;

        segment = &GST_BASE_SINK (multifilesink)->segment;
        format = segment->format;

        timestamp = GST_BUFFER_TIMESTAMP (buffer);
        duration = GST_BUFFER_DURATION (buffer);
        offset = GST_BUFFER_OFFSET (buffer);
        offset_end = GST_BUFFER_OFFSET_END (buffer);

        running_time = gst_segment_to_running_time (segment, format, timestamp);
        stream_time = gst_segment_to_stream_time (segment, format, timestamp);

        s = gst_structure_new ("GstMultiFileSink",
                               "filename", G_TYPE_STRING, filename,
                               "index", G_TYPE_INT, multifilesink->index,
                               "timestamp", G_TYPE_UINT64, timestamp,
                               "stream-time", G_TYPE_UINT64, stream_time,
                               "running-time", G_TYPE_UINT64, running_time,
                               "duration", G_TYPE_UINT64, duration,
                               "offset", G_TYPE_UINT64, offset,
                               "offset-end", G_TYPE_UINT64, offset_end, NULL);

        gst_element_post_message (GST_ELEMENT_CAST (multifilesink),
                                  gst_message_new_element (GST_OBJECT_CAST (multifilesink), s));
    }
}
Esempio n. 6
0
static GstMessage *
gst_level_message_new (GstLevel * level, GstClockTime timestamp,
    GstClockTime duration)
{
  GstBaseTransform *trans = GST_BASE_TRANSFORM_CAST (level);
  GstStructure *s;
  GValue v = { 0, };
  GstClockTime endtime, running_time, stream_time;

  running_time = gst_segment_to_running_time (&trans->segment, GST_FORMAT_TIME,
      timestamp);
  stream_time = gst_segment_to_stream_time (&trans->segment, GST_FORMAT_TIME,
      timestamp);
  /* endtime is for backwards compatibility */
  endtime = stream_time + duration;

  s = gst_structure_new ("level",
      "endtime", GST_TYPE_CLOCK_TIME, endtime,
      "timestamp", G_TYPE_UINT64, timestamp,
      "stream-time", G_TYPE_UINT64, stream_time,
      "running-time", G_TYPE_UINT64, running_time,
      "duration", G_TYPE_UINT64, duration, NULL);

  g_value_init (&v, G_TYPE_VALUE_ARRAY);
  g_value_take_boxed (&v, g_value_array_new (0));
  gst_structure_take_value (s, "rms", &v);

  g_value_init (&v, G_TYPE_VALUE_ARRAY);
  g_value_take_boxed (&v, g_value_array_new (0));
  gst_structure_take_value (s, "peak", &v);

  g_value_init (&v, G_TYPE_VALUE_ARRAY);
  g_value_take_boxed (&v, g_value_array_new (0));
  gst_structure_take_value (s, "decay", &v);

  return gst_message_new_element (GST_OBJECT (level), s);
}
/* this function does the actual processing
 */
static GstFlowReturn
gst_audio_panorama_transform (GstBaseTransform * base, GstBuffer * inbuf,
    GstBuffer * outbuf)
{
  GstAudioPanorama *filter = GST_AUDIO_PANORAMA (base);
  GstClockTime timestamp, stream_time;
  GstMapInfo inmap, outmap;

  timestamp = GST_BUFFER_TIMESTAMP (inbuf);
  stream_time =
      gst_segment_to_stream_time (&base->segment, GST_FORMAT_TIME, timestamp);

  GST_DEBUG_OBJECT (filter, "sync to %" GST_TIME_FORMAT,
      GST_TIME_ARGS (timestamp));

  if (GST_CLOCK_TIME_IS_VALID (stream_time))
    gst_object_sync_values (GST_OBJECT (filter), stream_time);

  gst_buffer_map (inbuf, &inmap, GST_MAP_READ);
  gst_buffer_map (outbuf, &outmap, GST_MAP_WRITE);

  if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_GAP))) {
    GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);
    memset (outmap.data, 0, outmap.size);
  } else {
    /* output always stereo, input mono or stereo,
     * and info describes input format */
    guint num_samples = outmap.size / (2 * GST_AUDIO_INFO_BPS (&filter->info));

    filter->process (filter, inmap.data, outmap.data, num_samples);
  }

  gst_buffer_unmap (inbuf, &inmap);
  gst_buffer_unmap (outbuf, &outmap);

  return GST_FLOW_OK;
}
/* GstBaseTransform vmethod implementations */
static GstFlowReturn
gst_audio_fx_base_iir_filter_transform_ip (GstBaseTransform * base,
    GstBuffer * buf)
{
  GstAudioFXBaseIIRFilter *filter = GST_AUDIO_FX_BASE_IIR_FILTER (base);
  guint num_samples;
  GstClockTime timestamp, stream_time;
  GstMapInfo map;

  timestamp = GST_BUFFER_TIMESTAMP (buf);
  stream_time =
      gst_segment_to_stream_time (&base->segment, GST_FORMAT_TIME, timestamp);

  GST_DEBUG_OBJECT (filter, "sync to %" GST_TIME_FORMAT,
      GST_TIME_ARGS (timestamp));

  if (GST_CLOCK_TIME_IS_VALID (stream_time))
    gst_object_sync_values (GST_OBJECT (filter), stream_time);

  gst_buffer_map (buf, &map, GST_MAP_READWRITE);
  num_samples = map.size / GST_AUDIO_FILTER_BPS (filter);

  g_mutex_lock (&filter->lock);
  if (filter->a == NULL || filter->b == NULL) {
    g_warn_if_fail (filter->a != NULL && filter->b != NULL);
    gst_buffer_unmap (buf, &map);
    g_mutex_unlock (&filter->lock);
    return GST_FLOW_ERROR;
  }
  filter->process (filter, map.data, num_samples);
  g_mutex_unlock (&filter->lock);

  gst_buffer_unmap (buf, &map);

  return GST_FLOW_OK;
}
static void
gst_multi_file_sink_post_message_from_time (GstMultiFileSink * multifilesink,
    GstClockTime timestamp, GstClockTime duration, const char *filename)
{
  GstClockTime running_time, stream_time;
  guint64 offset, offset_end;
  GstSegment *segment;
  GstFormat format;

  if (!multifilesink->post_messages)
    return;

  segment = &GST_BASE_SINK (multifilesink)->segment;
  format = segment->format;

  offset = -1;
  offset_end = -1;

  running_time = gst_segment_to_running_time (segment, format, timestamp);
  stream_time = gst_segment_to_stream_time (segment, format, timestamp);

  gst_multi_file_sink_post_message_full (multifilesink, timestamp, duration,
      offset, offset_end, running_time, stream_time, filename);
}
Esempio n. 10
0
static gboolean
gst_timidity_src_query (GstPad * pad, GstQuery * query)
{
  gboolean res = TRUE;
  GstTimidity *timidity = GST_TIMIDITY (gst_pad_get_parent (pad));
  GstFormat src_format, dst_format;
  gint64 src_value, dst_value;

  if (!timidity->song) {
    gst_object_unref (timidity);
    return FALSE;
  }

  switch (GST_QUERY_TYPE (query)) {
    case GST_QUERY_DURATION:
      gst_query_set_duration (query, GST_FORMAT_TIME,
          GST_MSECOND * (gint64) mid_song_get_total_time (timidity->song));
      break;
    case GST_QUERY_POSITION:
      gst_query_set_position (query, GST_FORMAT_TIME,
          timidity->o_segment->last_stop * timidity->time_per_frame);
      break;
    case GST_QUERY_CONVERT:
      gst_query_parse_convert (query, &src_format, &src_value,
          &dst_format, NULL);

      res =
          gst_timidity_src_convert (timidity, src_format, src_value,
          &dst_format, &dst_value);
      if (res)
        gst_query_set_convert (query, src_format, src_value, dst_format,
            dst_value);

      break;
    case GST_QUERY_FORMATS:
      gst_query_set_formats (query, 3,
          GST_FORMAT_TIME, GST_FORMAT_BYTES, GST_FORMAT_DEFAULT);
      break;
    case GST_QUERY_SEGMENT:{
      GstFormat format;
      gint64 start, stop;

      format = timidity->o_segment->format;

      start =
          gst_segment_to_stream_time (timidity->o_segment, format,
          timidity->o_segment->start);
      if ((stop = timidity->o_segment->stop) == -1)
        stop = timidity->o_segment->duration;
      else
        stop = gst_segment_to_stream_time (timidity->o_segment, format, stop);

      gst_query_set_segment (query, timidity->o_segment->rate, format, start,
          stop);
      res = TRUE;
      break;
    }
    case GST_QUERY_SEEKING:
      gst_query_set_seeking (query, timidity->o_segment->format,
          TRUE, 0, timidity->o_len);
      break;
    default:
      res = FALSE;
      break;
  }

  gst_object_unref (timidity);
  return res;
}
Esempio n. 11
0
static gboolean
gst_base_video_decoder_src_query (GstPad * pad, GstQuery * query)
{
  GstBaseVideoDecoder *dec;
  gboolean res = TRUE;

  dec = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad));

  switch GST_QUERY_TYPE
    (query) {
    case GST_QUERY_POSITION:
    {
      GstFormat format;
      gint64 time;

      gst_query_parse_position (query, &format, NULL);
      GST_DEBUG ("query in format %d", format);

      if (format != GST_FORMAT_TIME) {
        goto error;
      }

      time = dec->last_timestamp;
      time = gst_segment_to_stream_time (&dec->segment, GST_FORMAT_TIME, time);

      gst_query_set_position (query, format, time);

      res = TRUE;

      break;
    }
    case GST_QUERY_DURATION:
    {
      res = gst_pad_peer_query (dec->sinkpad, query);
      break;
    }
    case GST_QUERY_CONVERT:
    {
      GstFormat src_fmt, dest_fmt;
      gint64 src_val, dest_val;

      GST_DEBUG ("convert query");

      gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
      res =
          gst_base_video_decoder_src_convert (pad, src_fmt, src_val, &dest_fmt,
          &dest_val);
      if (!res)
        goto error;
      gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
      break;
    }
    default:
      res = gst_pad_query_default (pad, query);
    }
  gst_object_unref (dec);
  return res;

error:
  GST_ERROR_OBJECT (dec, "query failed");
  gst_object_unref (dec);
  return res;
}
Esempio n. 12
0
GstFlowReturn
gst_base_video_encoder_finish_frame (GstBaseVideoEncoder * base_video_encoder,
    GstVideoFrame * frame)
{
  GstFlowReturn ret;
  GstBaseVideoEncoderClass *base_video_encoder_class;

  base_video_encoder_class =
      GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder);

  frame->system_frame_number =
      GST_BASE_VIDEO_CODEC (base_video_encoder)->system_frame_number;
  GST_BASE_VIDEO_CODEC (base_video_encoder)->system_frame_number++;

  if (frame->is_sync_point) {
    base_video_encoder->distance_from_sync = 0;
    GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
  } else {
    GST_BUFFER_FLAG_SET (frame->src_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
  }

  frame->distance_from_sync = base_video_encoder->distance_from_sync;
  base_video_encoder->distance_from_sync++;

  frame->decode_frame_number = frame->system_frame_number - 1;
  if (frame->decode_frame_number < 0) {
    frame->decode_timestamp = 0;
  } else {
    frame->decode_timestamp = gst_util_uint64_scale (frame->decode_frame_number,
        GST_SECOND * GST_BASE_VIDEO_CODEC (base_video_encoder)->state.fps_d,
        GST_BASE_VIDEO_CODEC (base_video_encoder)->state.fps_n);
  }

  GST_BUFFER_TIMESTAMP (frame->src_buffer) = frame->presentation_timestamp;
  GST_BUFFER_DURATION (frame->src_buffer) = frame->presentation_duration;
  GST_BUFFER_OFFSET (frame->src_buffer) = frame->decode_timestamp;

  GST_BASE_VIDEO_CODEC (base_video_encoder)->frames =
      g_list_remove (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames, frame);

  if (!base_video_encoder->set_output_caps) {
    if (base_video_encoder_class->get_caps) {
      GST_BASE_VIDEO_CODEC (base_video_encoder)->caps =
          base_video_encoder_class->get_caps (base_video_encoder);
    } else {
      GST_BASE_VIDEO_CODEC (base_video_encoder)->caps =
          gst_caps_new_simple ("video/unknown", NULL);
    }
    gst_pad_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder),
        GST_BASE_VIDEO_CODEC (base_video_encoder)->caps);
    base_video_encoder->set_output_caps = TRUE;
  }

  gst_buffer_set_caps (GST_BUFFER (frame->src_buffer),
      GST_BASE_VIDEO_CODEC (base_video_encoder)->caps);

  if (frame->force_keyframe) {
    GstClockTime stream_time;
    GstClockTime running_time;
    GstStructure *s;

    running_time =
        gst_segment_to_running_time (&GST_BASE_VIDEO_CODEC
        (base_video_encoder)->segment, GST_FORMAT_TIME,
        frame->presentation_timestamp);
    stream_time =
        gst_segment_to_stream_time (&GST_BASE_VIDEO_CODEC
        (base_video_encoder)->segment, GST_FORMAT_TIME,
        frame->presentation_timestamp);

    /* FIXME this should send the event that we got on the sink pad
       instead of creating a new one */
    s = gst_structure_new ("GstForceKeyUnit",
        "timestamp", G_TYPE_UINT64, frame->presentation_timestamp,
        "stream-time", G_TYPE_UINT64, stream_time,
        "running-time", G_TYPE_UINT64, running_time, NULL);

    gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder),
        gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, s));
  }

  if (base_video_encoder_class->shape_output) {
    ret = base_video_encoder_class->shape_output (base_video_encoder, frame);
  } else {
    ret =
        gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder),
        frame->src_buffer);
  }

  gst_base_video_codec_free_frame (frame);

  return ret;
}
static gboolean
probe_cb (InsanityGstTest * ptest, GstPad * pad, GstMiniObject * object,
    gpointer userdata)
{
  InsanityTest *test = INSANITY_TEST (ptest);

  global_last_probe = g_get_monotonic_time ();

  if (GST_IS_BUFFER (object)) {
    GstClockTime buf_start, buf_end;
    GstBuffer *next_sub, *buf = GST_BUFFER (object);

    buf_start =
        gst_segment_to_stream_time (&glob_suboverlay_src_probe->last_segment,
        glob_suboverlay_src_probe->last_segment.format, GST_BUFFER_PTS (buf));
    buf_end = buf_start + GST_BUFFER_DURATION (buf);

    if (glob_in_progress == TEST_SUBTTILE_DESCRIPTOR_GENERATION) {
      if (glob_pipeline_restarted == TRUE) {
        gboolean has_subs;

        if (glob_duration > 0 && buf_end > glob_duration) {
          /* Done according to the duration previously found by the
           * discoverer */
          next_test (test);
        }

        has_subs = frame_contains_subtitles (buf);
        if (GST_CLOCK_TIME_IS_VALID (glob_last_subtitled_frame)) {
          if (has_subs == FALSE) {
            GstBuffer *nbuf = gst_buffer_new ();

            GST_BUFFER_PTS (nbuf) = glob_last_subtitled_frame;
            GST_BUFFER_DURATION (nbuf) = buf_end - glob_last_subtitled_frame;
            media_descriptor_writer_add_frame (glob_writer, pad, nbuf);

            glob_last_subtitled_frame = GST_CLOCK_TIME_NONE;
            gst_buffer_unref (nbuf);
          }
        } else if (has_subs) {
          glob_last_subtitled_frame = buf_start;
        }
      }

      goto done;
    }

    /* We played enough... next test */
    if (GST_CLOCK_TIME_IS_VALID (glob_first_subtitle_ts) &&
        buf_start >=
        glob_first_subtitle_ts + glob_playback_duration * GST_SECOND) {
      next_test (test);
    }

    switch (glob_in_progress) {
      case TEST_NONE:
      {

        if (glob_suboverlay_src_probe->waiting_first_segment == TRUE) {
          insanity_test_validate_checklist_item (test, "first-segment", FALSE,
              "Got a buffer before the first segment");
        }
        next_test (test);
      }
      default:
        break;
    }

    if (glob_subtitled_frames != NULL) {
      GstClockTime sub_start, sub_end;

      next_sub = GST_BUFFER (glob_subtitled_frames->data);

      sub_start = GST_BUFFER_PTS (next_sub);
      sub_end = GST_BUFFER_DURATION_IS_VALID (next_sub) ?
          GST_BUFFER_DURATION (next_sub) + sub_start : -1;

      if (buf_start >= sub_start && buf_end < sub_end) {
        if (frame_contains_subtitles (buf) == TRUE) {
          glob_sub_render_found = TRUE;
          insanity_test_validate_checklist_item (test, "subtitle-rendered",
              TRUE, NULL);
        } else {
          gchar *msg = g_strdup_printf ("Subtitle start %" GST_TIME_FORMAT
              " end %" GST_TIME_FORMAT " received buffer with no sub start %"
              GST_TIME_FORMAT " end %" GST_TIME_FORMAT,
              GST_TIME_ARGS (sub_start),
              GST_TIME_ARGS (sub_end), GST_TIME_ARGS (buf_start),
              GST_TIME_ARGS (buf_end));

          insanity_test_validate_checklist_item (test, "subtitle-rendered",
              FALSE, msg);
          glob_wrong_rendered_buf = TRUE;

          g_free (msg);
        }
      } else if (buf_end > sub_end) {
        /* We got a buffer that is after the subtitle we were waiting for
         * remove that buffer as not waiting for it anymore */
        gst_buffer_unref (next_sub);

        glob_subtitled_frames = g_list_remove (glob_subtitled_frames, next_sub);
      }
    }

  } else if (GST_IS_EVENT (object)) {
    GstEvent *event = GST_EVENT (object);

    switch (GST_EVENT_TYPE (event)) {
      case GST_EVENT_SEGMENT:
      {
        gst_event_copy_segment (event,
            &glob_suboverlay_src_probe->last_segment);

        if (glob_suboverlay_src_probe->waiting_first_segment == TRUE) {
          insanity_test_validate_checklist_item (test, "first-segment", TRUE,
              NULL);
          glob_suboverlay_src_probe->waiting_first_segment = FALSE;
        }

        if (glob_suboverlay_src_probe->waiting_segment == FALSE)
          /* Cache the segment as it will be our reference but don't look
           * further */
          goto done;

        if (glob_suboverlay_src_probe->waiting_first_segment == TRUE) {
          /* Make sure that a new segment has been received for each stream */
          glob_suboverlay_src_probe->waiting_first_segment = FALSE;
          glob_suboverlay_src_probe->waiting_segment = FALSE;
        }

        glob_suboverlay_src_probe->waiting_segment = FALSE;
        break;
      }
      default:
        break;
    }
  }

done:
  return TRUE;
}
/* Pipeline Callbacks */
static gboolean
renderer_probe_cb (InsanityGstTest * ptest, GstPad * pad,
    GstMiniObject * object, gpointer userdata)
{
  InsanityTest *test = INSANITY_TEST (ptest);

  if (GST_IS_BUFFER (object)) {
    gint64 stime_ts;
    GstBuffer *buf = GST_BUFFER (object), *nbuf;

    if (glob_in_progress == TEST_SUBTTILE_DESCRIPTOR_GENERATION)
      goto done;

    if (GST_CLOCK_TIME_IS_VALID (GST_BUFFER_DURATION (buf)) == FALSE
        && glob_parser == NULL) {
      gboolean generate_media_desc;

      insanity_test_get_boolean_argument (test, "create-media-descriptor",
          (gboolean *) & generate_media_desc);

      /* We generate the XML file if needed and allowed by user */
      if (generate_media_desc)
        generate_xml_media_descriptor (test);
      else
        insanity_test_done (test);
    } else if (glob_parser == NULL) {

      /* Avoid using xml descriptor when not needed */
      stime_ts =
          gst_segment_to_stream_time (&glob_renderer_sink_probe->last_segment,
          glob_renderer_sink_probe->last_segment.format, GST_BUFFER_PTS (buf));

      if (GST_CLOCK_TIME_IS_VALID (glob_first_subtitle_ts) == FALSE)
        glob_first_subtitle_ts = stime_ts;

      nbuf = gst_buffer_new ();
      GST_BUFFER_PTS (nbuf) = stime_ts;
      GST_BUFFER_DURATION (nbuf) = GST_BUFFER_DURATION (buf);

      glob_subtitled_frames = g_list_insert_sorted (glob_subtitled_frames, nbuf,
          (GCompareFunc) sort_subtitle_bufs);
    }
  } else if (GST_IS_EVENT (object)) {
    GstEvent *event = GST_EVENT (object);

    switch (GST_EVENT_TYPE (event)) {
      case GST_EVENT_SEGMENT:
      {
        /* We do not care about event during subtitle generation */
        if (glob_in_progress == TEST_SUBTTILE_DESCRIPTOR_GENERATION)
          goto done;

        gst_event_copy_segment (event, &glob_renderer_sink_probe->last_segment);

        if (glob_renderer_sink_probe->waiting_segment == FALSE)
          /* Cache the segment as it will be our reference but don't look
           * further */
          goto done;

        if (glob_renderer_sink_probe->waiting_first_segment == TRUE) {
          /* Make sure that a new segment has been received for each stream */
          glob_renderer_sink_probe->waiting_first_segment = FALSE;
          glob_renderer_sink_probe->waiting_segment = FALSE;
        }

        glob_renderer_sink_probe->waiting_segment = FALSE;
        break;
      }
      default:
        break;
    }
  }

done:
  return TRUE;
}
static GstFlowReturn
gst_gdk_pixbuf_sink_handle_buffer (GstBaseSink * basesink, GstBuffer * buf,
    const gchar * msg_name)
{
  GstGdkPixbufSink *sink;
  GdkPixbuf *pixbuf;
  gboolean do_post;

  sink = GST_GDK_PIXBUF_SINK (basesink);

  pixbuf = gst_gdk_pixbuf_sink_get_pixbuf_from_buffer (sink, buf);

  GST_OBJECT_LOCK (sink);

  do_post = sink->post_messages;

  if (sink->last_pixbuf)
    g_object_unref (sink->last_pixbuf);

  sink->last_pixbuf = pixbuf;   /* take ownership */

  GST_OBJECT_UNLOCK (sink);

  if (G_UNLIKELY (pixbuf == NULL))
    goto error;

  if (do_post) {
    GstStructure *s;
    GstMessage *msg;
    GstFormat format;
    GstClockTime timestamp;
    GstClockTime running_time, stream_time;

    GstSegment *segment = &basesink->segment;
    format = segment->format;

    timestamp = GST_BUFFER_PTS (buf);
    running_time = gst_segment_to_running_time (segment, format, timestamp);
    stream_time = gst_segment_to_stream_time (segment, format, timestamp);

    /* it's okay to keep using pixbuf here, we can be sure no one is going to
     * unref or change sink->last_pixbuf before we return from this function.
     * The structure will take its own ref to the pixbuf. */
    s = gst_structure_new (msg_name,
        "pixbuf", GDK_TYPE_PIXBUF, pixbuf,
        "pixel-aspect-ratio", GST_TYPE_FRACTION, sink->par_n, sink->par_d,
        "timestamp", G_TYPE_UINT64, timestamp,
        "stream-time", G_TYPE_UINT64, stream_time,
        "running-time", G_TYPE_UINT64, running_time, NULL);

    msg = gst_message_new_element (GST_OBJECT_CAST (sink), s);
    gst_element_post_message (GST_ELEMENT_CAST (sink), msg);
  }

  g_object_notify (G_OBJECT (sink), "last-pixbuf");

  return GST_FLOW_OK;

/* ERRORS */
error:
  {
    /* This shouldn't really happen */
    GST_ELEMENT_ERROR (sink, LIBRARY, FAILED,
        ("Couldn't create pixbuf from RGB image."),
        ("Probably not enough free memory"));
    return GST_FLOW_ERROR;
  }
}
Esempio n. 16
0
static GstFlowReturn
gst_shape_wipe_video_sink_chain (GstPad * pad, GstObject * parent,
    GstBuffer * buffer)
{
  GstShapeWipe *self = GST_SHAPE_WIPE (parent);
  GstFlowReturn ret = GST_FLOW_OK;
  GstBuffer *mask = NULL, *outbuf = NULL;
  GstClockTime timestamp;
  gboolean new_outbuf = FALSE;
  GstVideoFrame inframe, outframe, maskframe;

  if (G_UNLIKELY (GST_VIDEO_INFO_FORMAT (&self->vinfo) ==
          GST_VIDEO_FORMAT_UNKNOWN))
    goto not_negotiated;

  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  timestamp =
      gst_segment_to_stream_time (&self->segment, GST_FORMAT_TIME, timestamp);

  if (GST_CLOCK_TIME_IS_VALID (timestamp))
    gst_object_sync_values (GST_OBJECT (self), timestamp);

  GST_LOG_OBJECT (self,
      "Blending buffer with timestamp %" GST_TIME_FORMAT " at position %f",
      GST_TIME_ARGS (timestamp), self->mask_position);

  g_mutex_lock (&self->mask_mutex);
  if (self->shutdown)
    goto shutdown;

  if (!self->mask)
    g_cond_wait (&self->mask_cond, &self->mask_mutex);

  if (self->mask == NULL || self->shutdown) {
    goto shutdown;
  } else {
    mask = gst_buffer_ref (self->mask);
  }
  g_mutex_unlock (&self->mask_mutex);

  if (!gst_shape_wipe_do_qos (self, GST_BUFFER_TIMESTAMP (buffer)))
    goto qos;

  /* Try to blend inplace, if it's not possible
   * get a new buffer from downstream. */
  if (!gst_buffer_is_writable (buffer)) {
    outbuf = gst_buffer_new_allocate (NULL, gst_buffer_get_size (buffer), NULL);
    gst_buffer_copy_into (outbuf, buffer, GST_BUFFER_COPY_METADATA, 0, -1);
    new_outbuf = TRUE;
  } else {
    outbuf = buffer;
  }

  gst_video_frame_map (&inframe, &self->vinfo, buffer,
      new_outbuf ? GST_MAP_READ : GST_MAP_READWRITE);
  gst_video_frame_map (&outframe, &self->vinfo, outbuf,
      new_outbuf ? GST_MAP_WRITE : GST_MAP_READWRITE);

  gst_video_frame_map (&maskframe, &self->minfo, mask, GST_MAP_READ);

  switch (GST_VIDEO_INFO_FORMAT (&self->vinfo)) {
    case GST_VIDEO_FORMAT_AYUV:
    case GST_VIDEO_FORMAT_ARGB:
    case GST_VIDEO_FORMAT_ABGR:
      if (self->mask_bpp == 16)
        gst_shape_wipe_blend_argb_16 (self, &inframe, &maskframe, &outframe);
      else
        gst_shape_wipe_blend_argb_8 (self, &inframe, &maskframe, &outframe);
      break;
    case GST_VIDEO_FORMAT_BGRA:
    case GST_VIDEO_FORMAT_RGBA:
      if (self->mask_bpp == 16)
        gst_shape_wipe_blend_bgra_16 (self, &inframe, &maskframe, &outframe);
      else
        gst_shape_wipe_blend_bgra_8 (self, &inframe, &maskframe, &outframe);
      break;
    default:
      g_assert_not_reached ();
      break;
  }

  gst_video_frame_unmap (&outframe);
  gst_video_frame_unmap (&inframe);

  gst_video_frame_unmap (&maskframe);

  gst_buffer_unref (mask);
  if (new_outbuf)
    gst_buffer_unref (buffer);

  ret = gst_pad_push (self->srcpad, outbuf);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto push_failed;

  return ret;

  /* Errors */
not_negotiated:
  {
    GST_ERROR_OBJECT (self, "No valid caps yet");
    gst_buffer_unref (buffer);
    return GST_FLOW_NOT_NEGOTIATED;
  }
shutdown:
  {
    GST_DEBUG_OBJECT (self, "Shutting down");
    gst_buffer_unref (buffer);
    return GST_FLOW_FLUSHING;
  }
qos:
  {
    GST_DEBUG_OBJECT (self, "Dropping buffer because of QoS");
    gst_buffer_unref (buffer);
    gst_buffer_unref (mask);
    return GST_FLOW_OK;
  }
push_failed:
  {
    GST_ERROR_OBJECT (self, "Pushing buffer downstream failed: %s",
        gst_flow_get_name (ret));
    return ret;
  }
}
static GstFlowReturn
gst_timecodestamper_transform_ip (GstBaseTransform * vfilter,
    GstBuffer * buffer)
{
  GstTimeCodeStamper *timecodestamper = GST_TIME_CODE_STAMPER (vfilter);
  GstClockTime ref_time;

  GST_OBJECT_LOCK (timecodestamper);
  if (gst_buffer_get_video_time_code_meta (buffer)
      && !timecodestamper->override_existing) {
    GST_OBJECT_UNLOCK (timecodestamper);
    return GST_FLOW_OK;
  } else if (timecodestamper->override_existing) {
    gst_buffer_foreach_meta (buffer, remove_timecode_meta, NULL);
  }

  if (timecodestamper->source_clock != NULL) {
    if (timecodestamper->current_tc->hours == 0
        && timecodestamper->current_tc->minutes == 0
        && timecodestamper->current_tc->seconds == 0
        && timecodestamper->current_tc->frames == 0) {
      guint64 hours, minutes, seconds, frames;
      /* Daily jam time */

      ref_time = gst_clock_get_time (timecodestamper->source_clock);
      ref_time = ref_time % (24 * 60 * 60 * GST_SECOND);
      hours = ref_time / (GST_SECOND * 60 * 60);
      ref_time -= hours * GST_SECOND * 60 * 60;
      minutes = ref_time / (GST_SECOND * 60);
      ref_time -= minutes * GST_SECOND * 60;
      seconds = ref_time / GST_SECOND;
      ref_time -= seconds * GST_SECOND;
      /* Converting to frames for the whole ref_time might be inaccurate in case
       * we have a drop frame timecode */
      frames = gst_util_uint64_scale (ref_time, timecodestamper->vinfo.fps_n,
          timecodestamper->vinfo.fps_d * GST_SECOND);

      GST_DEBUG_OBJECT (timecodestamper,
          "Initializing with %" G_GUINT64_FORMAT ":%" G_GUINT64_FORMAT ":%"
          G_GUINT64_FORMAT ":%" G_GUINT64_FORMAT "", hours, minutes, seconds,
          frames);
      gst_video_time_code_init (timecodestamper->current_tc,
          timecodestamper->vinfo.fps_n,
          timecodestamper->vinfo.fps_d,
          NULL,
          timecodestamper->vinfo.interlace_mode ==
          GST_VIDEO_INTERLACE_MODE_PROGRESSIVE ? 0 :
          GST_VIDEO_TIME_CODE_FLAGS_INTERLACED, hours, minutes, seconds, 0, 0);
      gst_timecodestamper_set_drop_frame (timecodestamper);
      /* Do not use frames when initializing because maybe we have drop frame */
      gst_video_time_code_add_frames (timecodestamper->current_tc, frames);
    }
  } else if (timecodestamper->source_clock == NULL) {
    GstClockTime timecode_time;

    timecode_time =
        gst_video_time_code_nsec_since_daily_jam (timecodestamper->current_tc);
    ref_time =
        gst_segment_to_stream_time (&vfilter->segment, GST_FORMAT_TIME,
        buffer->pts);
    if (timecode_time != GST_CLOCK_TIME_NONE && ref_time != GST_CLOCK_TIME_NONE
        && ((timecode_time > ref_time && timecode_time - ref_time > GST_SECOND)
            || (ref_time > timecode_time
                && ref_time - timecode_time > GST_SECOND))) {
      gchar *tc_str =
          gst_video_time_code_to_string (timecodestamper->current_tc);
      GST_WARNING_OBJECT (timecodestamper,
          "Time code %s (stream time %" GST_TIME_FORMAT
          ") has drifted more than one second from stream time %"
          GST_TIME_FORMAT, tc_str, GST_TIME_ARGS (timecode_time),
          GST_TIME_ARGS (ref_time));
      g_free (tc_str);
    }
  }
  gst_buffer_add_video_time_code_meta (buffer, timecodestamper->current_tc);
  gst_video_time_code_increment_frame (timecodestamper->current_tc);
  GST_OBJECT_UNLOCK (timecodestamper);
  return GST_FLOW_OK;
}
Esempio n. 18
0
static gboolean
vorbis_dec_src_query (GstPad * pad, GstQuery * query)
{
  GstVorbisDec *dec;
  gboolean res = FALSE;

  dec = GST_VORBIS_DEC (gst_pad_get_parent (pad));

  switch (GST_QUERY_TYPE (query)) {
    case GST_QUERY_POSITION:
    {
      gint64 value;
      GstFormat format;
      gint64 time;

      gst_query_parse_position (query, &format, NULL);

      /* we start from the last seen time */
      time = dec->last_timestamp;
      /* correct for the segment values */
      time = gst_segment_to_stream_time (&dec->segment, GST_FORMAT_TIME, time);

      GST_LOG_OBJECT (dec,
          "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));

      /* and convert to the final format */
      if (!(res =
              vorbis_dec_convert (pad, GST_FORMAT_TIME, time, &format, &value)))
        goto error;

      gst_query_set_position (query, format, value);

      GST_LOG_OBJECT (dec,
          "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
          format);

      break;
    }
    case GST_QUERY_DURATION:
    {
      res = gst_pad_peer_query (dec->sinkpad, query);
      if (!res)
        goto error;

      break;
    }
    case GST_QUERY_CONVERT:
    {
      GstFormat src_fmt, dest_fmt;
      gint64 src_val, dest_val;

      gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
      if (!(res =
              vorbis_dec_convert (pad, src_fmt, src_val, &dest_fmt, &dest_val)))
        goto error;
      gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
      break;
    }
    default:
      res = gst_pad_query_default (pad, query);
      break;
  }
done:
  gst_object_unref (dec);

  return res;

  /* ERRORS */
error:
  {
    GST_WARNING_OBJECT (dec, "error handling query");
    goto done;
  }
}
Esempio n. 19
0
static GstFlowReturn
gst_bml_transform_transform_mono_to_stereo (GstBaseTransform * base,
    GstBuffer * inbuf, GstBuffer * outbuf)
{
  GstMapInfo infoi, infoo;
  GstBMLTransform *bml_transform = GST_BML_TRANSFORM (base);
  GstBMLTransformClass *klass = GST_BML_TRANSFORM_GET_CLASS (bml_transform);
  GstBML *bml = GST_BML (bml_transform);
  GstBMLClass *bml_class = GST_BML_CLASS (klass);
  BMLData *datai, *datao, *seg_datai, *seg_datao;
  gpointer bm = bml->bm;
  guint todo, seg_size, samples_per_buffer;
  gboolean has_data;
  guint mode = 3;               /*WM_READWRITE */

  bml->running_time =
      gst_segment_to_stream_time (&base->segment, GST_FORMAT_TIME,
      GST_BUFFER_TIMESTAMP (inbuf));

  if (GST_BUFFER_FLAG_IS_SET (outbuf, GST_BUFFER_FLAG_DISCONT)) {
    bml->subtick_count = (!bml->reverse) ? bml->subticks_per_tick : 1;
  }

  if (bml->subtick_count >= bml->subticks_per_tick) {
    bml (gstbml_reset_triggers (bml, bml_class));
    bml (gstbml_sync_values (bml, bml_class, GST_BUFFER_TIMESTAMP (outbuf)));
    bml (tick (bm));
    bml->subtick_count = 1;
  } else {
    bml->subtick_count++;
  }

  /* don't process data in passthrough-mode */
  if (gst_base_transform_is_passthrough (base)) {
    // we would actually need to convert mono to stereo here
    // but this is not even called
    GST_WARNING_OBJECT (bml_transform, "m2s in passthrough mode");
    //return GST_FLOW_OK;
  }

  if (!gst_buffer_map (inbuf, &infoi, GST_MAP_READ)) {
    GST_WARNING_OBJECT (base, "unable to map input buffer for read");
    return GST_FLOW_ERROR;
  }
  datai = (BMLData *) infoi.data;
  samples_per_buffer = infoi.size / sizeof (BMLData);
  if (!gst_buffer_map (outbuf, &infoo, GST_MAP_READ | GST_MAP_WRITE)) {
    GST_WARNING_OBJECT (base, "unable to map output buffer for read & write");
    return GST_FLOW_ERROR;
  }
  datao = (BMLData *) infoo.data;

  // some buzzmachines expect a cleared buffer
  //for(i=0;i<samples_per_buffer*2;i++) datao[i]=0.0f;
  memset (datao, 0, samples_per_buffer * 2 * sizeof (BMLData));

  /* if buffer has only silence process with different mode */
  if (GST_BUFFER_FLAG_IS_SET (outbuf, GST_BUFFER_FLAG_GAP)) {
    mode = 2;                   /* WM_WRITE */
  } else {
    gfloat fc = 32768.0;
    orc_scalarmultiply_f32_ns (datai, datai, fc, samples_per_buffer);
  }

  GST_DEBUG_OBJECT (bml_transform, "  calling work_m2s(%d,%d)",
      samples_per_buffer, mode);
  todo = samples_per_buffer;
  seg_datai = datai;
  seg_datao = datao;
  has_data = FALSE;
  while (todo) {
    // 256 is MachineInterface.h::MAX_BUFFER_LENGTH
    seg_size = (todo > 256) ? 256 : todo;
    has_data |= bml (work_m2s (bm, seg_datai, seg_datao, (int) seg_size, mode));
    seg_datai = &seg_datai[seg_size];
    seg_datao = &seg_datao[seg_size * 2];
    todo -= seg_size;
  }
  if (gstbml_fix_data ((GstElement *) bml_transform, &infoo, has_data)) {
    GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);
  } else {
    GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_GAP);
  }

  gst_buffer_unmap (inbuf, &infoi);
  gst_buffer_unmap (outbuf, &infoo);
  return (GST_FLOW_OK);
}
Esempio n. 20
0
bool GStreamerReader::DecodeVideoFrame(bool &aKeyFrameSkip,
                                       int64_t aTimeThreshold)
{
    NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");

    GstBuffer *buffer = nullptr;

    {
        ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);

        if (mReachedVideoEos && !mVideoSinkBufferCount) {
            return false;
        }

        /* Wait something to be decoded before return or continue */
        if (!mVideoSinkBufferCount) {
            if (!mAudioSinkBufferCount) {
                /* We have nothing decoded so it makes no sense to return to the state machine
                 * as it will call us back immediately, we'll return again and so on, wasting
                 * CPU cycles for no job done. So, block here until there is either video or
                 * audio data available
                */
                mon.Wait();
                if (!mVideoSinkBufferCount) {
                    /* There is still no video data available, so either there is audio data or
                     * something else has happened (Eos, etc...). Return to the state machine
                     * to process it
                     */
                    return true;
                }
            }
            else {
                return true;
            }
        }

        mDecoder->NotifyDecodedFrames(0, 1);

#if GST_VERSION_MAJOR >= 1
        GstSample *sample = gst_app_sink_pull_sample(mVideoAppSink);
        buffer = gst_buffer_ref(gst_sample_get_buffer(sample));
        gst_sample_unref(sample);
#else
        buffer = gst_app_sink_pull_buffer(mVideoAppSink);
#endif
        mVideoSinkBufferCount--;
    }

    bool isKeyframe = !GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_DELTA_UNIT);
    if ((aKeyFrameSkip && !isKeyframe)) {
        gst_buffer_unref(buffer);
        return true;
    }

    int64_t timestamp = GST_BUFFER_TIMESTAMP(buffer);
    {
        ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
        timestamp = gst_segment_to_stream_time(&mVideoSegment,
                                               GST_FORMAT_TIME, timestamp);
    }
    NS_ASSERTION(GST_CLOCK_TIME_IS_VALID(timestamp),
                 "frame has invalid timestamp");

    timestamp = GST_TIME_AS_USECONDS(timestamp);
    int64_t duration = 0;
    if (GST_CLOCK_TIME_IS_VALID(GST_BUFFER_DURATION(buffer)))
        duration = GST_TIME_AS_USECONDS(GST_BUFFER_DURATION(buffer));
    else if (fpsNum && fpsDen)
        /* add 1-frame duration */
        duration = gst_util_uint64_scale(GST_USECOND, fpsDen, fpsNum);

    if (timestamp < aTimeThreshold) {
        LOG(PR_LOG_DEBUG, "skipping frame %" GST_TIME_FORMAT
            " threshold %" GST_TIME_FORMAT,
            GST_TIME_ARGS(timestamp * 1000),
            GST_TIME_ARGS(aTimeThreshold * 1000));
        gst_buffer_unref(buffer);
        return true;
    }

    if (!buffer)
        /* no more frames */
        return true;

#if GST_VERSION_MAJOR >= 1
    if (mConfigureAlignment && buffer->pool) {
        GstStructure *config = gst_buffer_pool_get_config(buffer->pool);
        GstVideoAlignment align;
        if (gst_buffer_pool_config_get_video_alignment(config, &align))
            gst_video_info_align(&mVideoInfo, &align);
        gst_structure_free(config);
        mConfigureAlignment = false;
    }
#endif

    nsRefPtr<PlanarYCbCrImage> image = GetImageFromBuffer(buffer);
    if (!image) {
        /* Ugh, upstream is not calling gst_pad_alloc_buffer(). Fallback to
         * allocating a PlanarYCbCrImage backed GstBuffer here and memcpy.
         */
        GstBuffer* tmp = nullptr;
        CopyIntoImageBuffer(buffer, &tmp, image);
        gst_buffer_unref(buffer);
        buffer = tmp;
    }

    int64_t offset = mDecoder->GetResource()->Tell(); // Estimate location in media.
    VideoData* video = VideoData::CreateFromImage(mInfo.mVideo,
                       mDecoder->GetImageContainer(),
                       offset, timestamp, duration,
                       static_cast<Image*>(image.get()),
                       isKeyframe, -1, mPicture);
    mVideoQueue.Push(video);

    gst_buffer_unref(buffer);

    return true;
}
/* Pipeline Callbacks */
static gboolean
probe_cb (InsanityGstTest * ptest, GstPad * pad, GstMiniObject * object,
    gpointer userdata)
{
  InsanityTest *test = INSANITY_TEST (ptest);

  global_last_probe = g_get_monotonic_time ();

  DECODER_TEST_LOCK ();
  if (GST_IS_BUFFER (object)) {
    GstBuffer *buf;
    GstClockTime ts;

    buf = GST_BUFFER (object);
    ts = GST_BUFFER_PTS (buf);

    /* First check clipping */
    if (glob_testing_parser == FALSE && GST_CLOCK_TIME_IS_VALID (ts) &&
        glob_waiting_segment == FALSE) {
      GstClockTime ts_end, cstart, cstop;

      /* Check if buffer is completely outside the segment */
      ts_end = ts;
      if (GST_BUFFER_DURATION_IS_VALID (buf))
        ts_end += GST_BUFFER_DURATION (buf);

      /* Check if buffer is completely outside the segment */
      ts_end = ts;
      if (!gst_segment_clip (&glob_last_segment,
              glob_last_segment.format, ts, ts_end, &cstart, &cstop)) {
        char *msg = g_strdup_printf ("Got timestamp %" GST_TIME_FORMAT " -- %"
            GST_TIME_FORMAT ", outside configured segment (%" GST_TIME_FORMAT
            " -- %" GST_TIME_FORMAT "), method %s",
            GST_TIME_ARGS (ts), GST_TIME_ARGS (ts_end),
            GST_TIME_ARGS (glob_last_segment.start),
            GST_TIME_ARGS (glob_last_segment.stop),
            test_get_name (glob_in_progress));
        insanity_test_validate_checklist_item (INSANITY_TEST (ptest),
            "segment-clipping", FALSE, msg);
        g_free (msg);
        glob_bad_segment_clipping = TRUE;
      }
    }

    switch (glob_in_progress) {
      case TEST_NONE:
        if (glob_waiting_first_segment == TRUE)
          insanity_test_validate_checklist_item (test, "first-segment",
              FALSE, "Got a buffer before the first segment");

        /* Got the first buffer, starting testing dance */
        next_test (test);
        break;
      case TEST_POSITION:
        test_position (test, buf);
        break;
      case TEST_FAST_FORWARD:
      case TEST_BACKWARD_PLAYBACK:
      case TEST_FAST_BACKWARD:
      {
        gint64 stime_ts;

        if (GST_CLOCK_TIME_IS_VALID (ts) == FALSE ||
            glob_waiting_segment == TRUE) {
          break;
        }

        stime_ts = gst_segment_to_stream_time (&glob_last_segment,
            glob_last_segment.format, ts);

        if (GST_CLOCK_TIME_IS_VALID (glob_seek_first_buf_ts) == FALSE) {
          GstClockTime expected_ts =
              gst_segment_to_stream_time (&glob_last_segment,
              glob_last_segment.format,
              glob_seek_rate <
              0 ? glob_seek_stop_ts : glob_seek_segment_seektime);

          GstClockTimeDiff diff = ABS (GST_CLOCK_DIFF (stime_ts, expected_ts));

          if (diff > SEEK_THRESHOLD) {
            gchar *valmsg =
                g_strdup_printf ("Received buffer timestamp %" GST_TIME_FORMAT
                " Seeek wanted %" GST_TIME_FORMAT "",
                GST_TIME_ARGS (stime_ts),
                GST_TIME_ARGS (expected_ts));

            validate_current_test (test, FALSE, valmsg);
            next_test (test);

            g_free (valmsg);
          } else
            glob_seek_first_buf_ts = stime_ts;

        } else {
          GstClockTimeDiff diff =
              GST_CLOCK_DIFF (stime_ts, glob_seek_first_buf_ts);

          if (diff < 0)
            diff = -diff;

          if (diff >= glob_playback_duration * GST_SECOND) {
            validate_current_test (test, TRUE, NULL);
            next_test (test);
          }
        }
        break;
      }
      default:
        break;
    }

  } else if (GST_IS_EVENT (object)) {
    GstEvent *event = GST_EVENT (object);
    guint seqnum = gst_event_get_seqnum (event);

    if (G_LIKELY (glob_seqnum_found == FALSE) && seqnum == glob_seqnum)
      glob_seqnum_found = TRUE;

    if (glob_seqnum_found == TRUE && seqnum != glob_seqnum) {
      gchar *message = g_strdup_printf ("Current seqnum %i != "
          "received %i", glob_seqnum, seqnum);

      insanity_test_validate_checklist_item (test, "seqnum-management",
          FALSE, message);

      glob_wrong_seqnum = TRUE;
      g_free (message);
    }

    switch (GST_EVENT_TYPE (event)) {
      case GST_EVENT_SEGMENT:
      {
        gst_event_copy_segment (event, &glob_last_segment);

        if (glob_waiting_segment == FALSE)
          /* Cache the segment as it will be our reference but don't look
           * further */
          goto done;

        glob_last_segment_start_time = glob_last_segment.start;
        if (glob_waiting_first_segment == TRUE) {
          insanity_test_validate_checklist_item (test, "first-segment", TRUE,
              NULL);

          glob_waiting_first_segment = FALSE;
        } else if (glob_in_progress >= TEST_FAST_FORWARD &&
            glob_in_progress <= TEST_FAST_BACKWARD) {
          GstClockTimeDiff diff;
          gboolean valid_stop = TRUE;
          GstClockTimeDiff wdiff, rdiff;

          rdiff =
              ABS (GST_CLOCK_DIFF (glob_last_segment.stop,
                  glob_last_segment.start)) * ABS (glob_last_segment.rate *
              glob_last_segment.applied_rate);
          wdiff =
              ABS (GST_CLOCK_DIFF (glob_seek_stop_ts,
                  glob_seek_segment_seektime));

          diff =
              GST_CLOCK_DIFF (glob_last_segment.position,
              glob_seek_segment_seektime);
          if (diff < 0)
            diff = -diff;

          /* Now compare with the expected segment */
          if ((glob_last_segment.rate * glob_last_segment.applied_rate) ==
              glob_seek_rate && diff <= SEEK_THRESHOLD && valid_stop) {
            glob_seek_got_segment = TRUE;
          } else {
            GstClockTime stopdiff = ABS (GST_CLOCK_DIFF (rdiff, wdiff));

            gchar *validate_msg =
                g_strdup_printf ("Wrong segment received, Rate %f expected "
                "%f, start time diff %" GST_TIME_FORMAT " stop diff %"
                GST_TIME_FORMAT,
                (glob_last_segment.rate * glob_last_segment.applied_rate),
                glob_seek_rate,
                GST_TIME_ARGS (diff), GST_TIME_ARGS (stopdiff));

            validate_current_test (test, FALSE, validate_msg);
            next_test (test);
            g_free (validate_msg);
          }
        }

        glob_waiting_segment = FALSE;
        break;
      }
      default:
        break;
    }
  }

done:
  DECODER_TEST_UNLOCK ();
  return TRUE;
}
Esempio n. 22
0
static GstFlowReturn
gst_dicetv_transform (GstBaseTransform * trans, GstBuffer * in, GstBuffer * out)
{
  GstDiceTV *filter = GST_DICETV (trans);
  guint32 *src, *dest;
  gint i, map_x, map_y, map_i, base, dx, dy, di;
  gint video_width, g_cube_bits, g_cube_size;
  gint g_map_height, g_map_width;
  GstFlowReturn ret = GST_FLOW_OK;
  GstClockTime timestamp, stream_time;
  const guint8 *dicemap;

  src = (guint32 *) GST_BUFFER_DATA (in);
  dest = (guint32 *) GST_BUFFER_DATA (out);

  timestamp = GST_BUFFER_TIMESTAMP (in);
  stream_time =
      gst_segment_to_stream_time (&trans->segment, GST_FORMAT_TIME, timestamp);

  GST_DEBUG_OBJECT (filter, "sync to %" GST_TIME_FORMAT,
      GST_TIME_ARGS (timestamp));

  if (GST_CLOCK_TIME_IS_VALID (stream_time))
    gst_object_sync_values (G_OBJECT (filter), stream_time);

  GST_OBJECT_LOCK (filter);
  video_width = filter->width;
  g_cube_bits = filter->g_cube_bits;
  g_cube_size = filter->g_cube_size;
  g_map_height = filter->g_map_height;
  g_map_width = filter->g_map_width;

  dicemap = filter->dicemap;

  map_i = 0;
  for (map_y = 0; map_y < g_map_height; map_y++) {
    for (map_x = 0; map_x < g_map_width; map_x++) {
      base = (map_y << g_cube_bits) * video_width + (map_x << g_cube_bits);

      switch (dicemap[map_i]) {
        case DICE_UP:
          for (dy = 0; dy < g_cube_size; dy++) {
            i = base + dy * video_width;
            for (dx = 0; dx < g_cube_size; dx++) {
              dest[i] = src[i];
              i++;
            }
          }
          break;
        case DICE_LEFT:
          for (dy = 0; dy < g_cube_size; dy++) {
            i = base + dy * video_width;

            for (dx = 0; dx < g_cube_size; dx++) {
              di = base + (dx * video_width) + (g_cube_size - dy - 1);
              dest[di] = src[i];
              i++;
            }
          }
          break;
        case DICE_DOWN:
          for (dy = 0; dy < g_cube_size; dy++) {
            di = base + dy * video_width;
            i = base + (g_cube_size - dy - 1) * video_width + g_cube_size;
            for (dx = 0; dx < g_cube_size; dx++) {
              i--;
              dest[di] = src[i];
              di++;
            }
          }
          break;
        case DICE_RIGHT:
          for (dy = 0; dy < g_cube_size; dy++) {
            i = base + (dy * video_width);
            for (dx = 0; dx < g_cube_size; dx++) {
              di = base + dy + (g_cube_size - dx - 1) * video_width;
              dest[di] = src[i];
              i++;
            }
          }
          break;
        default:
          g_assert_not_reached ();
          break;
      }
      map_i++;
    }
  }
  GST_OBJECT_UNLOCK (filter);

  return ret;
}
/* Called with lock held */
static gchar *
gst_time_overlay_get_text (GstBaseTextOverlay * overlay,
    GstBuffer * video_frame)
{
  GstTimeOverlayTimeLine time_line;
  gchar *time_str, *txt, *ret;

  overlay->need_render = TRUE;

  time_line = g_atomic_int_get (&GST_TIME_OVERLAY_CAST (overlay)->time_line);
  if (time_line == GST_TIME_OVERLAY_TIME_LINE_TIME_CODE) {
    GstVideoTimeCodeMeta *tc_meta =
        gst_buffer_get_video_time_code_meta (video_frame);
    if (!tc_meta) {
      GST_DEBUG ("buffer without valid timecode");
      return g_strdup ("00:00:00:00");
    }
    time_str = gst_video_time_code_to_string (&tc_meta->tc);
    GST_DEBUG ("buffer with timecode %s", time_str);
  } else {
    GstClockTime ts, ts_buffer;
    GstSegment *segment = &overlay->segment;

    ts_buffer = GST_BUFFER_TIMESTAMP (video_frame);

    if (!GST_CLOCK_TIME_IS_VALID (ts_buffer)) {
      GST_DEBUG ("buffer without valid timestamp");
      return g_strdup ("");
    }

    GST_DEBUG ("buffer with timestamp %" GST_TIME_FORMAT,
        GST_TIME_ARGS (ts_buffer));

    switch (time_line) {
      case GST_TIME_OVERLAY_TIME_LINE_STREAM_TIME:
        ts = gst_segment_to_stream_time (segment, GST_FORMAT_TIME, ts_buffer);
        break;
      case GST_TIME_OVERLAY_TIME_LINE_RUNNING_TIME:
        ts = gst_segment_to_running_time (segment, GST_FORMAT_TIME, ts_buffer);
        break;
      case GST_TIME_OVERLAY_TIME_LINE_BUFFER_TIME:
      default:
        ts = ts_buffer;
        break;
    }

    time_str = gst_time_overlay_render_time (GST_TIME_OVERLAY (overlay), ts);
  }
  txt = g_strdup (overlay->default_text);

  if (txt != NULL && *txt != '\0') {
    ret = g_strdup_printf ("%s %s", txt, time_str);
  } else {
    ret = time_str;
    time_str = NULL;
  }

  g_free (txt);
  g_free (time_str);

  return ret;
}
Esempio n. 24
0
nsresult GStreamerReader::ReadMetadata(MediaInfo* aInfo,
                                       MetadataTags** aTags)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
  nsresult ret = NS_OK;

  /*
   * Parse MP3 headers before we kick off the GStreamer pipeline otherwise there
   * might be concurrent stream operations happening on both decoding and gstreamer
   * threads which will screw the GStreamer state machine.
   */
  bool isMP3 = mDecoder->GetResource()->GetContentType().EqualsASCII(AUDIO_MP3);
  if (isMP3) {
    ParseMP3Headers();
  }


  /* We do 3 attempts here: decoding audio and video, decoding video only,
   * decoding audio only. This allows us to play streams that have one broken
   * stream but that are otherwise decodeable.
   */
  guint flags[3] = {GST_PLAY_FLAG_VIDEO|GST_PLAY_FLAG_AUDIO,
    static_cast<guint>(~GST_PLAY_FLAG_AUDIO), static_cast<guint>(~GST_PLAY_FLAG_VIDEO)};
  guint default_flags, current_flags;
  g_object_get(mPlayBin, "flags", &default_flags, nullptr);

  GstMessage* message = nullptr;
  for (unsigned int i = 0; i < G_N_ELEMENTS(flags); i++) {
    current_flags = default_flags & flags[i];
    g_object_set(G_OBJECT(mPlayBin), "flags", current_flags, nullptr);

    /* reset filter caps to ANY */
    GstCaps* caps = gst_caps_new_any();
    GstElement* filter = gst_bin_get_by_name(GST_BIN(mAudioSink), "filter");
    g_object_set(filter, "caps", caps, nullptr);
    gst_object_unref(filter);

    filter = gst_bin_get_by_name(GST_BIN(mVideoSink), "filter");
    g_object_set(filter, "caps", caps, nullptr);
    gst_object_unref(filter);
    gst_caps_unref(caps);
    filter = nullptr;

    if (!(current_flags & GST_PLAY_FLAG_AUDIO))
      filter = gst_bin_get_by_name(GST_BIN(mAudioSink), "filter");
    else if (!(current_flags & GST_PLAY_FLAG_VIDEO))
      filter = gst_bin_get_by_name(GST_BIN(mVideoSink), "filter");

    if (filter) {
      /* Little trick: set the target caps to "skip" so that playbin2 fails to
       * find a decoder for the stream we want to skip.
       */
      GstCaps* filterCaps = gst_caps_new_simple ("skip", nullptr, nullptr);
      g_object_set(filter, "caps", filterCaps, nullptr);
      gst_caps_unref(filterCaps);
      gst_object_unref(filter);
    }

    LOG(PR_LOG_DEBUG, "starting metadata pipeline");
    if (gst_element_set_state(mPlayBin, GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) {
      LOG(PR_LOG_DEBUG, "metadata pipeline state change failed");
      ret = NS_ERROR_FAILURE;
      continue;
    }

    /* Wait for ASYNC_DONE, which is emitted when the pipeline is built,
     * prerolled and ready to play. Also watch for errors.
     */
    message = gst_bus_timed_pop_filtered(mBus, GST_CLOCK_TIME_NONE,
                 (GstMessageType)(GST_MESSAGE_ASYNC_DONE | GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
    if (GST_MESSAGE_TYPE(message) == GST_MESSAGE_ASYNC_DONE) {
      LOG(PR_LOG_DEBUG, "read metadata pipeline prerolled");
      gst_message_unref(message);
      ret = NS_OK;
      break;
    } else {
      LOG(PR_LOG_DEBUG, "read metadata pipeline failed to preroll: %s",
            gst_message_type_get_name (GST_MESSAGE_TYPE (message)));

      if (GST_MESSAGE_TYPE(message) == GST_MESSAGE_ERROR) {
        GError* error;
        gchar* debug;
        gst_message_parse_error(message, &error, &debug);
        LOG(PR_LOG_ERROR, "read metadata error: %s: %s", error->message, debug);
        g_error_free(error);
        g_free(debug);
      }
      /* Unexpected stream close/EOS or other error. We'll give up if all
       * streams are in error/eos. */
      gst_element_set_state(mPlayBin, GST_STATE_NULL);
      gst_message_unref(message);
      ret = NS_ERROR_FAILURE;
    }
  }

  if (NS_SUCCEEDED(ret))
    ret = CheckSupportedFormats();

  if (NS_FAILED(ret))
    /* we couldn't get this to play */
    return ret;

  /* report the duration */
  gint64 duration;

  if (isMP3 && mMP3FrameParser.IsMP3()) {
    // The MP3FrameParser has reported a duration; use that over the gstreamer
    // reported duration for inter-platform consistency.
    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
    mUseParserDuration = true;
    mLastParserDuration = mMP3FrameParser.GetDuration();
    mDecoder->SetMediaDuration(mLastParserDuration);
  } else {
    LOG(PR_LOG_DEBUG, "querying duration");
    // Otherwise use the gstreamer duration.
#if GST_VERSION_MAJOR >= 1
    if (gst_element_query_duration(GST_ELEMENT(mPlayBin),
          GST_FORMAT_TIME, &duration)) {
#else
    GstFormat format = GST_FORMAT_TIME;
    if (gst_element_query_duration(GST_ELEMENT(mPlayBin),
      &format, &duration) && format == GST_FORMAT_TIME) {
#endif
      ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
      LOG(PR_LOG_DEBUG, "have duration %" GST_TIME_FORMAT, GST_TIME_ARGS(duration));
      duration = GST_TIME_AS_USECONDS (duration);
      mDecoder->SetMediaDuration(duration);
    }
  }

  int n_video = 0, n_audio = 0;
  g_object_get(mPlayBin, "n-video", &n_video, "n-audio", &n_audio, nullptr);
  mInfo.mVideo.mHasVideo = n_video != 0;
  mInfo.mAudio.mHasAudio = n_audio != 0;

  *aInfo = mInfo;

  *aTags = nullptr;

  // Watch the pipeline for fatal errors
#if GST_VERSION_MAJOR >= 1
  gst_bus_set_sync_handler(mBus, GStreamerReader::ErrorCb, this, nullptr);
#else
  gst_bus_set_sync_handler(mBus, GStreamerReader::ErrorCb, this);
#endif

  /* set the pipeline to PLAYING so that it starts decoding and queueing data in
   * the appsinks */
  gst_element_set_state(mPlayBin, GST_STATE_PLAYING);

  return NS_OK;
}

bool
GStreamerReader::IsMediaSeekable()
{
  if (mUseParserDuration) {
    return true;
  }

  gint64 duration;
#if GST_VERSION_MAJOR >= 1
  if (gst_element_query_duration(GST_ELEMENT(mPlayBin), GST_FORMAT_TIME,
                                 &duration)) {
#else
  GstFormat format = GST_FORMAT_TIME;
  if (gst_element_query_duration(GST_ELEMENT(mPlayBin), &format, &duration) &&
      format == GST_FORMAT_TIME) {
#endif
    return true;
  }

  return false;
}

nsresult GStreamerReader::CheckSupportedFormats()
{
  bool done = false;
  bool unsupported = false;

  GstIterator* it = gst_bin_iterate_recurse(GST_BIN(mPlayBin));
  while (!done) {
    GstIteratorResult res;
    GstElement* element;

#if GST_VERSION_MAJOR >= 1
    GValue value = {0,};
    res = gst_iterator_next(it, &value);
#else
    res = gst_iterator_next(it, (void **) &element);
#endif
    switch(res) {
      case GST_ITERATOR_OK:
      {
#if GST_VERSION_MAJOR >= 1
        element = GST_ELEMENT (g_value_get_object (&value));
#endif
        GstElementFactory* factory = gst_element_get_factory(element);
        if (factory) {
          const char* klass = gst_element_factory_get_klass(factory);
          GstPad* pad = gst_element_get_static_pad(element, "sink");
          if (pad) {
            GstCaps* caps;

#if GST_VERSION_MAJOR >= 1
            caps = gst_pad_get_current_caps(pad);
#else
            caps = gst_pad_get_negotiated_caps(pad);
#endif

            if (caps) {
              /* check for demuxers but ignore elements like id3demux */
              if (strstr (klass, "Demuxer") && !strstr(klass, "Metadata"))
                unsupported = !GStreamerFormatHelper::Instance()->CanHandleContainerCaps(caps);
              else if (strstr (klass, "Decoder") && !strstr(klass, "Generic"))
                unsupported = !GStreamerFormatHelper::Instance()->CanHandleCodecCaps(caps);

              gst_caps_unref(caps);
            }
            gst_object_unref(pad);
          }
        }

#if GST_VERSION_MAJOR >= 1
        g_value_unset (&value);
#else
        gst_object_unref(element);
#endif
        done = unsupported;
        break;
      }
      case GST_ITERATOR_RESYNC:
        unsupported = false;
        done = false;
        break;
      case GST_ITERATOR_ERROR:
        done = true;
        break;
      case GST_ITERATOR_DONE:
        done = true;
        break;
    }
  }

  return unsupported ? NS_ERROR_FAILURE : NS_OK;
}

nsresult GStreamerReader::ResetDecode()
{
  nsresult res = NS_OK;

  LOG(PR_LOG_DEBUG, "reset decode");

  if (NS_FAILED(MediaDecoderReader::ResetDecode())) {
    res = NS_ERROR_FAILURE;
  }

  mVideoQueue.Reset();
  mAudioQueue.Reset();

  mVideoSinkBufferCount = 0;
  mAudioSinkBufferCount = 0;
  mReachedAudioEos = false;
  mReachedVideoEos = false;
#if GST_VERSION_MAJOR >= 1
  mConfigureAlignment = true;
#endif

  LOG(PR_LOG_DEBUG, "reset decode done");

  return res;
}

bool GStreamerReader::DecodeAudioData()
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");

  GstBuffer *buffer = nullptr;

  {
    ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);

    if (mReachedAudioEos && !mAudioSinkBufferCount) {
      return false;
    }

    /* Wait something to be decoded before return or continue */
    if (!mAudioSinkBufferCount) {
      if(!mVideoSinkBufferCount) {
        /* We have nothing decoded so it makes no sense to return to the state machine
         * as it will call us back immediately, we'll return again and so on, wasting
         * CPU cycles for no job done. So, block here until there is either video or
         * audio data available
        */
        mon.Wait();
        if (!mAudioSinkBufferCount) {
          /* There is still no audio data available, so either there is video data or
           * something else has happened (Eos, etc...). Return to the state machine
           * to process it.
           */
          return true;
        }
      }
      else {
        return true;
      }
    }

#if GST_VERSION_MAJOR >= 1
    GstSample *sample = gst_app_sink_pull_sample(mAudioAppSink);
    buffer = gst_buffer_ref(gst_sample_get_buffer(sample));
    gst_sample_unref(sample);
#else
    buffer = gst_app_sink_pull_buffer(mAudioAppSink);
#endif

    mAudioSinkBufferCount--;
  }

  int64_t timestamp = GST_BUFFER_TIMESTAMP(buffer);
  {
    ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
    timestamp = gst_segment_to_stream_time(&mAudioSegment,
                                           GST_FORMAT_TIME, timestamp);
  }
  timestamp = GST_TIME_AS_USECONDS(timestamp);

  int64_t offset = GST_BUFFER_OFFSET(buffer);
  guint8* data;
#if GST_VERSION_MAJOR >= 1
  GstMapInfo info;
  gst_buffer_map(buffer, &info, GST_MAP_READ);
  unsigned int size = info.size;
  data = info.data;
#else
  unsigned int size = GST_BUFFER_SIZE(buffer);
  data = GST_BUFFER_DATA(buffer);
#endif
  int32_t frames = (size / sizeof(AudioDataValue)) / mInfo.mAudio.mChannels;

  typedef AudioCompactor::NativeCopy GstCopy;
  mAudioCompactor.Push(offset,
                       timestamp,
                       mInfo.mAudio.mRate,
                       frames,
                       mInfo.mAudio.mChannels,
                       GstCopy(data,
                               size,
                               mInfo.mAudio.mChannels));
#if GST_VERSION_MAJOR >= 1
  gst_buffer_unmap(buffer, &info);
#endif

  gst_buffer_unref(buffer);

  return true;
}

bool GStreamerReader::DecodeVideoFrame(bool &aKeyFrameSkip,
                                       int64_t aTimeThreshold)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");

  GstBuffer *buffer = nullptr;

  {
    ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);

    if (mReachedVideoEos && !mVideoSinkBufferCount) {
      return false;
    }

    /* Wait something to be decoded before return or continue */
    if (!mVideoSinkBufferCount) {
      if (!mAudioSinkBufferCount) {
        /* We have nothing decoded so it makes no sense to return to the state machine
         * as it will call us back immediately, we'll return again and so on, wasting
         * CPU cycles for no job done. So, block here until there is either video or
         * audio data available
        */
        mon.Wait();
        if (!mVideoSinkBufferCount) {
          /* There is still no video data available, so either there is audio data or
           * something else has happened (Eos, etc...). Return to the state machine
           * to process it
           */
          return true;
        }
      }
      else {
        return true;
      }
    }

    mDecoder->NotifyDecodedFrames(0, 1);

#if GST_VERSION_MAJOR >= 1
    GstSample *sample = gst_app_sink_pull_sample(mVideoAppSink);
    buffer = gst_buffer_ref(gst_sample_get_buffer(sample));
    gst_sample_unref(sample);
#else
    buffer = gst_app_sink_pull_buffer(mVideoAppSink);
#endif
    mVideoSinkBufferCount--;
  }

  bool isKeyframe = !GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_DELTA_UNIT);
  if ((aKeyFrameSkip && !isKeyframe)) {
    gst_buffer_unref(buffer);
    return true;
  }

  int64_t timestamp = GST_BUFFER_TIMESTAMP(buffer);
  {
    ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
    timestamp = gst_segment_to_stream_time(&mVideoSegment,
                                           GST_FORMAT_TIME, timestamp);
  }
  NS_ASSERTION(GST_CLOCK_TIME_IS_VALID(timestamp),
               "frame has invalid timestamp");

  timestamp = GST_TIME_AS_USECONDS(timestamp);
  int64_t duration = 0;
  if (GST_CLOCK_TIME_IS_VALID(GST_BUFFER_DURATION(buffer)))
    duration = GST_TIME_AS_USECONDS(GST_BUFFER_DURATION(buffer));
  else if (fpsNum && fpsDen)
    /* add 1-frame duration */
    duration = gst_util_uint64_scale(GST_USECOND, fpsDen, fpsNum);

  if (timestamp < aTimeThreshold) {
    LOG(PR_LOG_DEBUG, "skipping frame %" GST_TIME_FORMAT
                      " threshold %" GST_TIME_FORMAT,
                      GST_TIME_ARGS(timestamp * 1000),
                      GST_TIME_ARGS(aTimeThreshold * 1000));
    gst_buffer_unref(buffer);
    return true;
  }

  if (!buffer)
    /* no more frames */
    return true;

#if GST_VERSION_MAJOR >= 1
  if (mConfigureAlignment && buffer->pool) {
    GstStructure *config = gst_buffer_pool_get_config(buffer->pool);
    GstVideoAlignment align;
    if (gst_buffer_pool_config_get_video_alignment(config, &align))
      gst_video_info_align(&mVideoInfo, &align);
    gst_structure_free(config);
    mConfigureAlignment = false;
  }
#endif

  nsRefPtr<PlanarYCbCrImage> image = GetImageFromBuffer(buffer);
  if (!image) {
    /* Ugh, upstream is not calling gst_pad_alloc_buffer(). Fallback to
     * allocating a PlanarYCbCrImage backed GstBuffer here and memcpy.
     */
    GstBuffer* tmp = nullptr;
    CopyIntoImageBuffer(buffer, &tmp, image);
    gst_buffer_unref(buffer);
    buffer = tmp;
  }

  int64_t offset = mDecoder->GetResource()->Tell(); // Estimate location in media.
  VideoData* video = VideoData::CreateFromImage(mInfo.mVideo,
                                                mDecoder->GetImageContainer(),
                                                offset, timestamp, duration,
                                                static_cast<Image*>(image.get()),
                                                isKeyframe, -1, mPicture);
  mVideoQueue.Push(video);

  gst_buffer_unref(buffer);

  return true;
}
Esempio n. 25
0
static GstFlowReturn
theora_enc_chain (GstPad * pad, GstBuffer * buffer)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  GstClockTime timestamp, duration, running_time;
  GstFlowReturn ret;
  gboolean force_keyframe;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  /* we keep track of two timelines.
   * - The timestamps from the incomming buffers, which we copy to the outgoing
   *   encoded buffers as-is. We need to do this as we simply forward the
   *   newsegment events.
   * - The running_time of the buffers, which we use to construct the granulepos
   *   in the packets.
   */
  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  duration = GST_BUFFER_DURATION (buffer);

  running_time =
      gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, timestamp);
  if ((gint64) running_time < 0) {
    GST_DEBUG_OBJECT (enc, "Dropping buffer, timestamp: %" GST_TIME_FORMAT,
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)));
    gst_buffer_unref (buffer);
    return GST_FLOW_OK;
  }

  GST_OBJECT_LOCK (enc);
  if (enc->bitrate_changed) {
    long int bitrate = enc->video_bitrate;

    th_encode_ctl (enc->encoder, TH_ENCCTL_SET_BITRATE, &bitrate,
        sizeof (long int));
    enc->bitrate_changed = FALSE;
  }

  if (enc->quality_changed) {
    long int quality = enc->video_quality;

    th_encode_ctl (enc->encoder, TH_ENCCTL_SET_QUALITY, &quality,
        sizeof (long int));
    enc->quality_changed = FALSE;
  }

  /* see if we need to schedule a keyframe */
  force_keyframe = enc->force_keyframe;
  enc->force_keyframe = FALSE;
  GST_OBJECT_UNLOCK (enc);

  if (force_keyframe) {
    GstClockTime stream_time;
    GstStructure *s;

    stream_time = gst_segment_to_stream_time (&enc->segment,
        GST_FORMAT_TIME, timestamp);

    s = gst_structure_new ("GstForceKeyUnit",
        "timestamp", G_TYPE_UINT64, timestamp,
        "stream-time", G_TYPE_UINT64, stream_time,
        "running-time", G_TYPE_UINT64, running_time, NULL);

    theora_enc_force_keyframe (enc);

    gst_pad_push_event (enc->srcpad,
        gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, s));
  }

  /* make sure we copy the discont flag to the next outgoing buffer when it's
   * set on the incomming buffer */
  if (GST_BUFFER_IS_DISCONT (buffer)) {
    enc->next_discont = TRUE;
  }

  if (enc->packetno == 0) {
    /* no packets written yet, setup headers */
    GstCaps *caps;
    GstBuffer *buf;
    GSList *buffers = NULL;
    int result;

    enc->granulepos_offset = 0;
    enc->timestamp_offset = 0;

    GST_DEBUG_OBJECT (enc, "output headers");
    /* Theora streams begin with three headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.  The
       third header holds the bitstream codebook.  We merely need to
       make the headers, then pass them to libtheora one at a time;
       libtheora handles the additional Ogg bitstream constraints */

    /* create the remaining theora headers */
    th_comment_clear (&enc->comment);
    th_comment_init (&enc->comment);

    while ((result =
            th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
      ret =
          theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
          GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf);
      if (ret != GST_FLOW_OK) {
        goto header_buffer_alloc;
      }
      buffers = g_slist_prepend (buffers, buf);
    }
    if (result < 0) {
      g_slist_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
      g_slist_free (buffers);
      goto encoder_disabled;
    }

    buffers = g_slist_reverse (buffers);

    /* mark buffers and put on caps */
    caps = gst_pad_get_caps (enc->srcpad);
    caps = theora_set_header_on_caps (caps, buffers);
    GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, caps);
    gst_pad_set_caps (enc->srcpad, caps);

    g_slist_foreach (buffers, (GFunc) gst_buffer_set_caps, caps);

    gst_caps_unref (caps);

    /* push out the header buffers */
    while (buffers) {
      buf = buffers->data;
      buffers = g_slist_delete_link (buffers, buffers);
      if ((ret = theora_push_buffer (enc, buf)) != GST_FLOW_OK) {
        g_slist_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
        g_slist_free (buffers);
        goto header_push;
      }
    }

    enc->granulepos_offset =
        gst_util_uint64_scale (running_time, enc->fps_n,
        GST_SECOND * enc->fps_d);
    enc->timestamp_offset = running_time;
    enc->next_ts = 0;
  }

  {
    th_ycbcr_buffer ycbcr;
    gint res;

    theora_enc_init_buffer (ycbcr, &enc->info, GST_BUFFER_DATA (buffer));

    if (theora_enc_is_discontinuous (enc, running_time, duration)) {
      theora_enc_reset (enc);
      enc->granulepos_offset =
          gst_util_uint64_scale (running_time, enc->fps_n,
          GST_SECOND * enc->fps_d);
      enc->timestamp_offset = running_time;
      enc->next_ts = 0;
      enc->next_discont = TRUE;
    }

    if (enc->multipass_cache_fd
        && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
      if (!theora_enc_read_multipass_cache (enc)) {
        ret = GST_FLOW_ERROR;
        goto multipass_read_failed;
      }
    }

    res = th_encode_ycbcr_in (enc->encoder, ycbcr);
    /* none of the failure cases can happen here */
    g_assert (res == 0);

    if (enc->multipass_cache_fd
        && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
      if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
        ret = GST_FLOW_ERROR;
        goto multipass_write_failed;
      }
    }

    ret = GST_FLOW_OK;
    while (th_encode_packetout (enc->encoder, 0, &op)) {
      GstClockTime next_time;

      next_time = th_granule_time (enc->encoder, op.granulepos) * GST_SECOND;

      ret =
          theora_push_packet (enc, &op, timestamp, enc->next_ts,
          next_time - enc->next_ts);

      enc->next_ts = next_time;
      if (ret != GST_FLOW_OK)
        goto data_push;
    }
    gst_buffer_unref (buffer);
  }

  return ret;

  /* ERRORS */
multipass_read_failed:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
multipass_write_failed:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_buffer_alloc:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
data_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
encoder_disabled:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("libtheora has been compiled with the encoder disabled"));
    gst_buffer_unref (buffer);
    return GST_FLOW_ERROR;
  }
}
Esempio n. 26
0
static gboolean
gst_wildmidi_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
{
  gboolean res = TRUE;
  GstWildmidi *wildmidi = GST_WILDMIDI (parent);
  GstFormat src_format, dst_format;
  gint64 src_value, dst_value;

  if (!wildmidi->song)
    return FALSE;

  switch (GST_QUERY_TYPE (query)) {
    case GST_QUERY_DURATION:
      gst_query_set_duration (query, GST_FORMAT_TIME,
          gst_util_uint64_scale_int (wildmidi->o_len, GST_SECOND,
              WILDMIDI_RATE));
      break;
    case GST_QUERY_POSITION:
      gst_query_set_position (query, GST_FORMAT_TIME,
          gst_util_uint64_scale_int (wildmidi->o_segment->position, GST_SECOND,
              WILDMIDI_RATE));
      break;
    case GST_QUERY_CONVERT:
      gst_query_parse_convert (query, &src_format, &src_value,
          &dst_format, NULL);

      res =
          gst_wildmidi_src_convert (wildmidi, src_format, src_value,
          &dst_format, &dst_value);
      if (res)
        gst_query_set_convert (query, src_format, src_value, dst_format,
            dst_value);

      break;
    case GST_QUERY_FORMATS:
      gst_query_set_formats (query, 3,
          GST_FORMAT_TIME, GST_FORMAT_BYTES, GST_FORMAT_DEFAULT);
      break;
    case GST_QUERY_SEGMENT:{
      GstFormat format;
      gint64 start, stop;

      format = wildmidi->o_segment->format;

      start =
          gst_segment_to_stream_time (wildmidi->o_segment, format,
          wildmidi->o_segment->start);
      if ((stop = wildmidi->o_segment->stop) == -1)
        stop = wildmidi->o_segment->duration;
      else
        stop = gst_segment_to_stream_time (wildmidi->o_segment, format, stop);

      gst_query_set_segment (query, wildmidi->o_segment->rate, format, start,
          stop);
      res = TRUE;
      break;
    }
    case GST_QUERY_SEEKING:
      gst_query_set_seeking (query, wildmidi->o_segment->format,
          TRUE, 0, wildmidi->o_len);
      break;
    default:
      res = gst_pad_query_default (pad, parent, query);
      break;
  }

  return res;
}
Esempio n. 27
0
static GstFlowReturn
gst_bml_transform_transform_ip_stereo (GstBaseTransform * base,
    GstBuffer * outbuf)
{
  GstMapInfo info;
  GstBMLTransform *bml_transform = GST_BML_TRANSFORM (base);
  GstBMLTransformClass *klass = GST_BML_TRANSFORM_GET_CLASS (bml_transform);
  GstBML *bml = GST_BML (bml_transform);
  GstBMLClass *bml_class = GST_BML_CLASS (klass);
  BMLData *data, *seg_data;
  gpointer bm = bml->bm;
  guint todo, seg_size, samples_per_buffer;
  gboolean has_data;
  guint mode = 3;               /*WM_READWRITE */

  bml->running_time =
      gst_segment_to_stream_time (&base->segment, GST_FORMAT_TIME,
      GST_BUFFER_TIMESTAMP (outbuf));

  if (GST_BUFFER_FLAG_IS_SET (outbuf, GST_BUFFER_FLAG_DISCONT)) {
    bml->subtick_count = (!bml->reverse) ? bml->subticks_per_tick : 1;
  }

  /* TODO(ensonic): sync on subticks ? */
  if (bml->subtick_count >= bml->subticks_per_tick) {
    bml (gstbml_reset_triggers (bml, bml_class));
    bml (gstbml_sync_values (bml, bml_class, GST_BUFFER_TIMESTAMP (outbuf)));
    bml (tick (bm));
    bml->subtick_count = 1;
  } else {
    bml->subtick_count++;
  }

  /* don't process data in passthrough-mode */
  if (gst_base_transform_is_passthrough (base))
    return GST_FLOW_OK;

  if (!gst_buffer_map (outbuf, &info, GST_MAP_READ | GST_MAP_WRITE)) {
    GST_WARNING_OBJECT (base, "unable to map buffer for read & write");
    return GST_FLOW_ERROR;
  }
  data = (BMLData *) info.data;
  samples_per_buffer = info.size / (sizeof (BMLData) * 2);

  /* if buffer has only silence process with different mode */
  if (GST_BUFFER_FLAG_IS_SET (outbuf, GST_BUFFER_FLAG_GAP)) {
    mode = 2;                   /* WM_WRITE */
  } else {
    gfloat fc = 32768.0;
    orc_scalarmultiply_f32_ns (data, data, fc, samples_per_buffer * 2);
  }

  GST_DEBUG_OBJECT (bml_transform, "  calling work_m2s(%d,%d)",
      samples_per_buffer, mode);
  todo = samples_per_buffer;
  seg_data = data;
  has_data = FALSE;
  while (todo) {
    // 256 is MachineInterface.h::MAX_BUFFER_LENGTH
    seg_size = (todo > 256) ? 256 : todo;
    // first seg_data can be NULL, its ignored
    has_data |= bml (work_m2s (bm, seg_data, seg_data, (int) seg_size, mode));
    seg_data = &seg_data[seg_size * 2];
    todo -= seg_size;
  }
  if (gstbml_fix_data ((GstElement *) bml_transform, &info, has_data)) {
    GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);
  } else {
    GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_GAP);
  }

  gst_buffer_unmap (outbuf, &info);

  return (GST_FLOW_OK);
}
static GstMessage *
gst_spectrum_message_new (GstSpectrum * spectrum, GstClockTime timestamp,
    GstClockTime duration)
{
  GstBaseTransform *trans = GST_BASE_TRANSFORM_CAST (spectrum);
  GstSpectrumChannel *cd;
  GstStructure *s;
  GValue *mcv = NULL, *pcv = NULL;
  GstClockTime endtime, running_time, stream_time;

  GST_DEBUG_OBJECT (spectrum, "preparing message, bands =%d ", spectrum->bands);

  running_time = gst_segment_to_running_time (&trans->segment, GST_FORMAT_TIME,
      timestamp);
  stream_time = gst_segment_to_stream_time (&trans->segment, GST_FORMAT_TIME,
      timestamp);
  /* endtime is for backwards compatibility */
  endtime = stream_time + duration;

  s = gst_structure_new ("spectrum",
      "endtime", GST_TYPE_CLOCK_TIME, endtime,
      "timestamp", G_TYPE_UINT64, timestamp,
      "stream-time", G_TYPE_UINT64, stream_time,
      "running-time", G_TYPE_UINT64, running_time,
      "duration", G_TYPE_UINT64, duration, NULL);

  if (!spectrum->multi_channel) {
    cd = &spectrum->channel_data[0];

    if (spectrum->message_magnitude) {
      /* FIXME 0.11: this should be an array, not a list */
      mcv = gst_spectrum_message_add_container (s, GST_TYPE_LIST, "magnitude");
      gst_spectrum_message_add_list (mcv, cd->spect_magnitude, spectrum->bands);
    }
    if (spectrum->message_phase) {
      /* FIXME 0.11: this should be an array, not a list */
      pcv = gst_spectrum_message_add_container (s, GST_TYPE_LIST, "phase");
      gst_spectrum_message_add_list (pcv, cd->spect_phase, spectrum->bands);
    }
  } else {
    guint c;
    guint channels = GST_AUDIO_FILTER (spectrum)->format.channels;

    if (spectrum->message_magnitude) {
      mcv = gst_spectrum_message_add_container (s, GST_TYPE_ARRAY, "magnitude");
    }
    if (spectrum->message_phase) {
      pcv = gst_spectrum_message_add_container (s, GST_TYPE_ARRAY, "phase");
    }

    for (c = 0; c < channels; c++) {
      cd = &spectrum->channel_data[c];

      if (spectrum->message_magnitude) {
        gst_spectrum_message_add_array (mcv, cd->spect_magnitude,
            spectrum->bands);
      }
      if (spectrum->message_phase) {
        gst_spectrum_message_add_array (pcv, cd->spect_magnitude,
            spectrum->bands);
      }
    }
  }
  return gst_message_new_element (GST_OBJECT (spectrum), s);
}
static GstFlowReturn
gst_audio_fx_base_fir_filter_transform (GstBaseTransform * base,
    GstBuffer * inbuf, GstBuffer * outbuf)
{
  GstAudioFXBaseFIRFilter *self = GST_AUDIO_FX_BASE_FIR_FILTER (base);
  GstClockTime timestamp, expected_timestamp;
  gint channels = GST_AUDIO_FILTER_CHANNELS (self);
  gint rate = GST_AUDIO_FILTER_RATE (self);
  gint bps = GST_AUDIO_FILTER_BPS (self);
  GstMapInfo inmap, outmap;
  guint input_samples;
  guint output_samples;
  guint generated_samples;
  guint64 output_offset;
  gint64 diff = 0;
  GstClockTime stream_time;

  timestamp = GST_BUFFER_TIMESTAMP (outbuf);

  if (!GST_CLOCK_TIME_IS_VALID (timestamp)
      && !GST_CLOCK_TIME_IS_VALID (self->start_ts)) {
    GST_ERROR_OBJECT (self, "Invalid timestamp");
    return GST_FLOW_ERROR;
  }

  g_mutex_lock (&self->lock);
  stream_time =
      gst_segment_to_stream_time (&base->segment, GST_FORMAT_TIME, timestamp);

  GST_DEBUG_OBJECT (self, "sync to %" GST_TIME_FORMAT,
      GST_TIME_ARGS (timestamp));

  if (GST_CLOCK_TIME_IS_VALID (stream_time))
    gst_object_sync_values (GST_OBJECT (self), stream_time);

  g_return_val_if_fail (self->kernel != NULL, GST_FLOW_ERROR);
  g_return_val_if_fail (channels != 0, GST_FLOW_ERROR);

  if (GST_CLOCK_TIME_IS_VALID (self->start_ts))
    expected_timestamp =
        self->start_ts + gst_util_uint64_scale_int (self->nsamples_in,
        GST_SECOND, rate);
  else
    expected_timestamp = GST_CLOCK_TIME_NONE;

  /* Reset the residue if already existing on discont buffers */
  if (GST_BUFFER_IS_DISCONT (inbuf)
      || (GST_CLOCK_TIME_IS_VALID (expected_timestamp)
          && (ABS (GST_CLOCK_DIFF (timestamp,
                      expected_timestamp) > 5 * GST_MSECOND)))) {
    GST_DEBUG_OBJECT (self, "Discontinuity detected - flushing");
    if (GST_CLOCK_TIME_IS_VALID (expected_timestamp))
      gst_audio_fx_base_fir_filter_push_residue (self);
    self->buffer_fill = 0;
    g_free (self->buffer);
    self->buffer = NULL;
    self->start_ts = timestamp;
    self->start_off = GST_BUFFER_OFFSET (inbuf);
    self->nsamples_out = 0;
    self->nsamples_in = 0;
  } else if (!GST_CLOCK_TIME_IS_VALID (self->start_ts)) {
    self->start_ts = timestamp;
    self->start_off = GST_BUFFER_OFFSET (inbuf);
  }

  gst_buffer_map (inbuf, &inmap, GST_MAP_READ);
  gst_buffer_map (outbuf, &outmap, GST_MAP_WRITE);

  input_samples = (inmap.size / bps) / channels;
  output_samples = (outmap.size / bps) / channels;

  self->nsamples_in += input_samples;

  generated_samples =
      self->process (self, inmap.data, outmap.data, input_samples);

  gst_buffer_unmap (inbuf, &inmap);
  gst_buffer_unmap (outbuf, &outmap);

  g_assert (generated_samples <= output_samples);
  self->nsamples_out += generated_samples;
  if (generated_samples == 0)
    goto no_samples;

  /* Calculate the number of samples we can push out now without outputting
   * latency zeros in the beginning */
  diff = ((gint64) self->nsamples_out) - ((gint64) self->latency);
  if (diff < 0)
    goto no_samples;

  if (diff < generated_samples) {
    gint64 tmp = diff;
    diff = generated_samples - diff;
    generated_samples = tmp;
  } else {
    diff = 0;
  }

  gst_buffer_resize (outbuf, diff * bps * channels,
      generated_samples * bps * channels);

  output_offset = self->nsamples_out - self->latency - generated_samples;
  GST_BUFFER_TIMESTAMP (outbuf) =
      self->start_ts + gst_util_uint64_scale_int (output_offset, GST_SECOND,
      rate);
  GST_BUFFER_DURATION (outbuf) =
      gst_util_uint64_scale_int (output_samples, GST_SECOND, rate);
  if (self->start_off != GST_BUFFER_OFFSET_NONE) {
    GST_BUFFER_OFFSET (outbuf) = self->start_off + output_offset;
    GST_BUFFER_OFFSET_END (outbuf) =
        GST_BUFFER_OFFSET (outbuf) + generated_samples;
  } else {
    GST_BUFFER_OFFSET (outbuf) = GST_BUFFER_OFFSET_NONE;
    GST_BUFFER_OFFSET_END (outbuf) = GST_BUFFER_OFFSET_NONE;
  }
  g_mutex_unlock (&self->lock);

  GST_DEBUG_OBJECT (self,
      "Pushing buffer of size %" G_GSIZE_FORMAT " with timestamp: %"
      GST_TIME_FORMAT ", duration: %" GST_TIME_FORMAT ", offset: %"
      G_GUINT64_FORMAT ", offset_end: %" G_GUINT64_FORMAT ", nsamples_out: %d",
      gst_buffer_get_size (outbuf),
      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)),
      GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)), GST_BUFFER_OFFSET (outbuf),
      GST_BUFFER_OFFSET_END (outbuf), generated_samples);

  return GST_FLOW_OK;

no_samples:
  {
    g_mutex_unlock (&self->lock);
    return GST_BASE_TRANSFORM_FLOW_DROPPED;
  }
}
Esempio n. 30
0
static GstFlowReturn
gst_frei0r_mixer_collected (GstCollectPads * pads, GstFrei0rMixer * self)
{
  GstBuffer *inbuf0 = NULL, *inbuf1 = NULL, *inbuf2 = NULL;
  GstBuffer *outbuf = NULL;
  GstFlowReturn ret = GST_FLOW_OK;
  GSList *l;
  GstFrei0rMixerClass *klass = GST_FREI0R_MIXER_GET_CLASS (self);
  GstClockTime timestamp;
  gdouble time;
  GstSegment *segment = NULL;
  GstAllocationParams alloc_params = { 0, 31, 0, 0 };
  GstMapInfo outmap, inmap0, inmap1, inmap2;

  if (G_UNLIKELY (self->info.width <= 0 || self->info.height <= 0))
    return GST_FLOW_NOT_NEGOTIATED;

  if (G_UNLIKELY (!self->f0r_instance)) {
    self->f0r_instance = gst_frei0r_instance_construct (klass->ftable,
        klass->properties, klass->n_properties, self->property_cache,
        self->info.width, self->info.height);
    if (G_UNLIKELY (!self->f0r_instance))
      return GST_FLOW_ERROR;
  }

  if (self->segment_event) {
    gst_pad_push_event (self->src, self->segment_event);
    self->segment_event = NULL;
  }

  /* FIXME Request an allocator and/or pool */
  outbuf = gst_buffer_new_allocate (NULL, self->info.size, &alloc_params);

  for (l = pads->data; l; l = l->next) {
    GstCollectData *cdata = l->data;

    if (cdata->pad == self->sink0) {
      inbuf0 = gst_collect_pads_pop (pads, cdata);
      segment = &cdata->segment;
    } else if (cdata->pad == self->sink1) {
      inbuf1 = gst_collect_pads_pop (pads, cdata);
    } else if (cdata->pad == self->sink2) {
      inbuf2 = gst_collect_pads_pop (pads, cdata);
    }
  }

  if (!inbuf0 || !inbuf1 || (!inbuf2 && self->sink2))
    goto eos;

  gst_buffer_map (outbuf, &outmap, GST_MAP_READWRITE);
  gst_buffer_map (inbuf0, &inmap0, GST_MAP_READ);
  gst_buffer_map (inbuf1, &inmap1, GST_MAP_READ);
  if (inbuf2)
    gst_buffer_map (inbuf2, &inmap2, GST_MAP_READ);

  g_assert (segment != NULL);
  timestamp = GST_BUFFER_PTS (inbuf0);
  timestamp = gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp);

  GST_DEBUG_OBJECT (self, "sync to %" GST_TIME_FORMAT,
      GST_TIME_ARGS (timestamp));

  if (GST_CLOCK_TIME_IS_VALID (timestamp))
    gst_object_sync_values (GST_OBJECT (self), timestamp);

  gst_buffer_copy_into (outbuf, inbuf0,
      GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
  time = ((gdouble) GST_BUFFER_PTS (outbuf)) / GST_SECOND;

  GST_OBJECT_LOCK (self);
  klass->ftable->update2 (self->f0r_instance, time,
      (const guint32 *) inmap0.data, (const guint32 *) inmap1.data,
      (inbuf2) ? (const guint32 *) inmap2.data : NULL, (guint32 *) outmap.data);
  GST_OBJECT_UNLOCK (self);

  gst_buffer_unmap (outbuf, &outmap);
  gst_buffer_unref (inbuf0);
  gst_buffer_unmap (inbuf0, &inmap0);
  gst_buffer_unref (inbuf1);
  gst_buffer_unmap (inbuf1, &inmap1);
  if (inbuf2) {
    gst_buffer_unmap (inbuf2, &inmap2);
    gst_buffer_unref (inbuf2);
  }

  ret = gst_pad_push (self->src, outbuf);

  return ret;

eos:
  {
    GST_DEBUG_OBJECT (self, "no data available, must be EOS");
    gst_buffer_unref (outbuf);

    if (inbuf0)
      gst_buffer_unref (inbuf0);
    if (inbuf1)
      gst_buffer_unref (inbuf1);
    if (inbuf2)
      gst_buffer_unref (inbuf2);

    gst_pad_push_event (self->src, gst_event_new_eos ());
    return GST_FLOW_EOS;
  }
}