static gboolean
gst_timecodestamper_sink_event (GstBaseTransform * trans, GstEvent * event)
{
  gboolean ret = FALSE;
  GstTimeCodeStamper *timecodestamper = GST_TIME_CODE_STAMPER (trans);

  GST_DEBUG_OBJECT (trans, "received event %" GST_PTR_FORMAT, event);
  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_SEGMENT:
    {
      GstSegment segment;
      guint64 frames;
      gchar *tc_str;
      gboolean notify = FALSE;

      GST_OBJECT_LOCK (timecodestamper);

      gst_event_copy_segment (event, &segment);
      if (segment.format != GST_FORMAT_TIME) {
        GST_OBJECT_UNLOCK (timecodestamper);
        GST_ERROR_OBJECT (timecodestamper, "Invalid segment format");
        return FALSE;
      }
      if (GST_VIDEO_INFO_FORMAT (&timecodestamper->vinfo) ==
          GST_VIDEO_FORMAT_UNKNOWN) {
        GST_ERROR_OBJECT (timecodestamper,
            "Received segment event without caps");
        GST_OBJECT_UNLOCK (timecodestamper);
        return FALSE;
      }

      if (timecodestamper->first_tc_now && !timecodestamper->first_tc) {
        GDateTime *dt = g_date_time_new_now_local ();
        GstVideoTimeCode *tc;

        gst_timecodestamper_set_drop_frame (timecodestamper);

        tc = gst_video_time_code_new_from_date_time (timecodestamper->
            vinfo.fps_n, timecodestamper->vinfo.fps_d, dt,
            timecodestamper->current_tc->config.flags, 0);

        g_date_time_unref (dt);

        timecodestamper->first_tc = tc;
        notify = TRUE;
      }

      frames =
          gst_util_uint64_scale (segment.time, timecodestamper->vinfo.fps_n,
          timecodestamper->vinfo.fps_d * GST_SECOND);
      gst_timecodestamper_reset_timecode (timecodestamper);
      gst_video_time_code_add_frames (timecodestamper->current_tc, frames);
      GST_DEBUG_OBJECT (timecodestamper,
          "Got %" G_GUINT64_FORMAT " frames when segment time is %"
          GST_TIME_FORMAT, frames, GST_TIME_ARGS (segment.time));
      tc_str = gst_video_time_code_to_string (timecodestamper->current_tc);
      GST_DEBUG_OBJECT (timecodestamper, "New timecode is %s", tc_str);
      g_free (tc_str);
      GST_OBJECT_UNLOCK (timecodestamper);
      if (notify)
        g_object_notify (G_OBJECT (timecodestamper), "first-timecode");
      break;
    }
    case GST_EVENT_CAPS:
    {
      GstCaps *caps;

      GST_OBJECT_LOCK (timecodestamper);
      gst_event_parse_caps (event, &caps);
      if (!gst_video_info_from_caps (&timecodestamper->vinfo, caps)) {
        GST_OBJECT_UNLOCK (timecodestamper);
        return FALSE;
      }
      gst_timecodestamper_reset_timecode (timecodestamper);
      GST_OBJECT_UNLOCK (timecodestamper);
      break;
    }
    default:
      break;
  }
  ret =
      GST_BASE_TRANSFORM_CLASS (gst_timecodestamper_parent_class)->sink_event
      (trans, event);
  return ret;
}
Exemple #2
0
static gboolean
gst_video_rate_setcaps (GstBaseTransform * trans, GstCaps * in_caps,
    GstCaps * out_caps)
{
  GstVideoRate *videorate;
  GstStructure *structure;
  gboolean ret = TRUE;
  gint rate_numerator, rate_denominator;

  videorate = GST_VIDEO_RATE (trans);

  GST_DEBUG_OBJECT (trans, "setcaps called in: %" GST_PTR_FORMAT
      " out: %" GST_PTR_FORMAT, in_caps, out_caps);

  structure = gst_caps_get_structure (in_caps, 0);
  if (!gst_structure_get_fraction (structure, "framerate",
          &rate_numerator, &rate_denominator))
    goto no_framerate;

  videorate->from_rate_numerator = rate_numerator;
  videorate->from_rate_denominator = rate_denominator;

  structure = gst_caps_get_structure (out_caps, 0);
  if (!gst_structure_get_fraction (structure, "framerate",
          &rate_numerator, &rate_denominator))
    goto no_framerate;

  /* out_frame_count is scaled by the frame rate caps when calculating next_ts.
   * when the frame rate caps change, we must update base_ts and reset
   * out_frame_count */
  if (videorate->to_rate_numerator) {
    videorate->base_ts +=
        gst_util_uint64_scale (videorate->out_frame_count,
        videorate->to_rate_denominator * GST_SECOND,
        videorate->to_rate_numerator);
  }
  videorate->out_frame_count = 0;
  videorate->to_rate_numerator = rate_numerator;
  videorate->to_rate_denominator = rate_denominator;

  if (rate_numerator)
    videorate->wanted_diff = gst_util_uint64_scale_int (GST_SECOND,
        rate_denominator, rate_numerator);
  else
    videorate->wanted_diff = 0;

done:
  /* After a setcaps, our caps may have changed. In that case, we can't use
   * the old buffer, if there was one (it might have different dimensions) */
  GST_DEBUG_OBJECT (videorate, "swapping old buffers");
  gst_video_rate_swap_prev (videorate, NULL, GST_CLOCK_TIME_NONE);
  videorate->last_ts = GST_CLOCK_TIME_NONE;
  videorate->average = 0;

  return ret;

no_framerate:
  {
    GST_DEBUG_OBJECT (videorate, "no framerate specified");
    ret = FALSE;
    goto done;
  }
}
Exemple #3
0
static gboolean
gst_video_rate_query (GstBaseTransform * trans, GstPadDirection direction,
    GstQuery * query)
{
  GstVideoRate *videorate = GST_VIDEO_RATE (trans);
  gboolean res = FALSE;
  GstPad *otherpad;

  otherpad = (direction == GST_PAD_SRC) ?
      GST_BASE_TRANSFORM_SINK_PAD (trans) : GST_BASE_TRANSFORM_SRC_PAD (trans);

  switch (GST_QUERY_TYPE (query)) {
    case GST_QUERY_LATENCY:
    {
      GstClockTime min, max;
      gboolean live;
      guint64 latency;
      guint64 avg_period;
      GstPad *peer;

      GST_OBJECT_LOCK (videorate);
      avg_period = videorate->average_period_set;
      GST_OBJECT_UNLOCK (videorate);

      if (avg_period == 0 && (peer = gst_pad_get_peer (otherpad))) {
        if ((res = gst_pad_query (peer, query))) {
          gst_query_parse_latency (query, &live, &min, &max);

          GST_DEBUG_OBJECT (videorate, "Peer latency: min %"
              GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
              GST_TIME_ARGS (min), GST_TIME_ARGS (max));

          if (videorate->from_rate_numerator != 0) {
            /* add latency. We don't really know since we hold on to the frames
             * until we get a next frame, which can be anything. We assume
             * however that this will take from_rate time. */
            latency = gst_util_uint64_scale (GST_SECOND,
                videorate->from_rate_denominator,
                videorate->from_rate_numerator);
          } else {
            /* no input framerate, we don't know */
            latency = 0;
          }

          GST_DEBUG_OBJECT (videorate, "Our latency: %"
              GST_TIME_FORMAT, GST_TIME_ARGS (latency));

          min += latency;
          if (max != -1)
            max += latency;

          GST_DEBUG_OBJECT (videorate, "Calculated total latency : min %"
              GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
              GST_TIME_ARGS (min), GST_TIME_ARGS (max));

          gst_query_set_latency (query, live, min, max);
        }
        gst_object_unref (peer);
        break;
      }
      /* Simple fallthrough if we don't have a latency or not a peer that we
       * can't ask about its latency yet.. */
    }
    default:
      res = parent_class->query (trans, direction, query);
      break;
  }

  return res;
}
static void
gst_amc_video_dec_loop (GstAmcVideoDec * self)
{
  GstVideoCodecFrame *frame;
  GstFlowReturn flow_ret = GST_FLOW_OK;
  GstClockTimeDiff deadline;
  gboolean is_eos;
  GstAmcBuffer *buf;
  GstAmcBufferInfo buffer_info;
  gint idx;
  GError *err = NULL;

  GST_VIDEO_DECODER_STREAM_LOCK (self);

retry:
  /*if (self->input_state_changed) {
     idx = INFO_OUTPUT_FORMAT_CHANGED;
     } else { */
  GST_DEBUG_OBJECT (self, "Waiting for available output buffer");
  GST_VIDEO_DECODER_STREAM_UNLOCK (self);
  /* Wait at most 100ms here, some codecs don't fail dequeueing if
   * the codec is flushing, causing deadlocks during shutdown */
  idx =
      gst_amc_codec_dequeue_output_buffer (self->codec, &buffer_info, 100000,
      &err);
  GST_VIDEO_DECODER_STREAM_LOCK (self);
  /*} */

  if (idx < 0) {
    if (self->flushing) {
      g_clear_error (&err);
      goto flushing;
    }

    switch (idx) {
      case INFO_OUTPUT_BUFFERS_CHANGED:
        /* Handled internally */
        g_assert_not_reached ();
        break;
      case INFO_OUTPUT_FORMAT_CHANGED:{
        GstAmcFormat *format;
        gchar *format_string;

        GST_DEBUG_OBJECT (self, "Output format has changed");

        format = gst_amc_codec_get_output_format (self->codec, &err);
        if (!format)
          goto format_error;

        format_string = gst_amc_format_to_string (format, &err);
        if (!format) {
          gst_amc_format_free (format);
          goto format_error;
        }
        GST_DEBUG_OBJECT (self, "Got new output format: %s", format_string);
        g_free (format_string);

        if (!gst_amc_video_dec_set_src_caps (self, format)) {
          gst_amc_format_free (format);
          goto format_error;
        }
        gst_amc_format_free (format);

        goto retry;
      }
      case INFO_TRY_AGAIN_LATER:
        GST_DEBUG_OBJECT (self, "Dequeueing output buffer timed out");
        goto retry;
      case G_MININT:
        GST_ERROR_OBJECT (self, "Failure dequeueing output buffer");
        goto dequeue_error;
      default:
        g_assert_not_reached ();
        break;
    }

    goto retry;
  }

  GST_DEBUG_OBJECT (self,
      "Got output buffer at index %d: offset %d size %d time %" G_GINT64_FORMAT
      " flags 0x%08x", idx, buffer_info.offset, buffer_info.size,
      buffer_info.presentation_time_us, buffer_info.flags);

  frame =
      _find_nearest_frame (self,
      gst_util_uint64_scale (buffer_info.presentation_time_us, GST_USECOND, 1));

  is_eos = ! !(buffer_info.flags & BUFFER_FLAG_END_OF_STREAM);

  buf = gst_amc_codec_get_output_buffer (self->codec, idx, &err);
  if (!buf)
    goto failed_to_get_output_buffer;

  if (frame
      && (deadline =
          gst_video_decoder_get_max_decode_time (GST_VIDEO_DECODER (self),
              frame)) < 0) {
    GST_WARNING_OBJECT (self,
        "Frame is too late, dropping (deadline %" GST_TIME_FORMAT ")",
        GST_TIME_ARGS (-deadline));
    flow_ret = gst_video_decoder_drop_frame (GST_VIDEO_DECODER (self), frame);
  } else if (!frame && buffer_info.size > 0) {
    GstBuffer *outbuf;

    /* This sometimes happens at EOS or if the input is not properly framed,
     * let's handle it gracefully by allocating a new buffer for the current
     * caps and filling it
     */
    GST_ERROR_OBJECT (self, "No corresponding frame found");

    outbuf =
        gst_video_decoder_allocate_output_buffer (GST_VIDEO_DECODER (self));

    if (!gst_amc_video_dec_fill_buffer (self, buf, &buffer_info, outbuf)) {
      gst_buffer_unref (outbuf);
      if (!gst_amc_codec_release_output_buffer (self->codec, idx, &err))
        GST_ERROR_OBJECT (self, "Failed to release output buffer index %d",
            idx);
      if (err && !self->flushing)
        GST_ELEMENT_WARNING_FROM_ERROR (self, err);
      g_clear_error (&err);
      gst_amc_buffer_free (buf);
      buf = NULL;
      goto invalid_buffer;
    }

    GST_BUFFER_PTS (outbuf) =
        gst_util_uint64_scale (buffer_info.presentation_time_us, GST_USECOND,
        1);
    flow_ret = gst_pad_push (GST_VIDEO_DECODER_SRC_PAD (self), outbuf);
  } else if (buffer_info.size > 0) {
    if ((flow_ret = gst_video_decoder_allocate_output_frame (GST_VIDEO_DECODER
                (self), frame)) != GST_FLOW_OK) {
      GST_ERROR_OBJECT (self, "Failed to allocate buffer");
      if (!gst_amc_codec_release_output_buffer (self->codec, idx, &err))
        GST_ERROR_OBJECT (self, "Failed to release output buffer index %d",
            idx);
      if (err && !self->flushing)
        GST_ELEMENT_WARNING_FROM_ERROR (self, err);
      g_clear_error (&err);
      gst_amc_buffer_free (buf);
      buf = NULL;
      goto flow_error;
    }

    if (!gst_amc_video_dec_fill_buffer (self, buf, &buffer_info,
            frame->output_buffer)) {
      gst_buffer_replace (&frame->output_buffer, NULL);
      gst_video_decoder_drop_frame (GST_VIDEO_DECODER (self), frame);
      if (!gst_amc_codec_release_output_buffer (self->codec, idx, &err))
        GST_ERROR_OBJECT (self, "Failed to release output buffer index %d",
            idx);
      if (err && !self->flushing)
        GST_ELEMENT_WARNING_FROM_ERROR (self, err);
      g_clear_error (&err);
      gst_amc_buffer_free (buf);
      buf = NULL;
      goto invalid_buffer;
    }

    flow_ret = gst_video_decoder_finish_frame (GST_VIDEO_DECODER (self), frame);
  } else if (frame != NULL) {
    flow_ret = gst_video_decoder_drop_frame (GST_VIDEO_DECODER (self), frame);
  }

  gst_amc_buffer_free (buf);
  buf = NULL;

  if (!gst_amc_codec_release_output_buffer (self->codec, idx, &err)) {
    if (self->flushing) {
      g_clear_error (&err);
      goto flushing;
    }
    goto failed_release;
  }

  if (is_eos || flow_ret == GST_FLOW_EOS) {
    GST_VIDEO_DECODER_STREAM_UNLOCK (self);
    g_mutex_lock (&self->drain_lock);
    if (self->draining) {
      GST_DEBUG_OBJECT (self, "Drained");
      self->draining = FALSE;
      g_cond_broadcast (&self->drain_cond);
    } else if (flow_ret == GST_FLOW_OK) {
      GST_DEBUG_OBJECT (self, "Component signalled EOS");
      flow_ret = GST_FLOW_EOS;
    }
    g_mutex_unlock (&self->drain_lock);
    GST_VIDEO_DECODER_STREAM_LOCK (self);
  } else {
    GST_DEBUG_OBJECT (self, "Finished frame: %s", gst_flow_get_name (flow_ret));
  }

  self->downstream_flow_ret = flow_ret;

  if (flow_ret != GST_FLOW_OK)
    goto flow_error;

  GST_VIDEO_DECODER_STREAM_UNLOCK (self);

  return;

dequeue_error:
  {
    GST_ELEMENT_ERROR_FROM_ERROR (self, err);
    gst_pad_push_event (GST_VIDEO_DECODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_VIDEO_DECODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_ERROR;
    GST_VIDEO_DECODER_STREAM_UNLOCK (self);
    g_mutex_lock (&self->drain_lock);
    self->draining = FALSE;
    g_cond_broadcast (&self->drain_cond);
    g_mutex_unlock (&self->drain_lock);
    return;
  }

format_error:
  {
    if (err)
      GST_ELEMENT_ERROR_FROM_ERROR (self, err);
    else
      GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL),
          ("Failed to handle format"));
    gst_pad_push_event (GST_VIDEO_DECODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_VIDEO_DECODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_ERROR;
    GST_VIDEO_DECODER_STREAM_UNLOCK (self);
    g_mutex_lock (&self->drain_lock);
    self->draining = FALSE;
    g_cond_broadcast (&self->drain_cond);
    g_mutex_unlock (&self->drain_lock);
    return;
  }
failed_release:
  {
    GST_VIDEO_DECODER_ERROR_FROM_ERROR (self, err);
    gst_pad_push_event (GST_VIDEO_DECODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_VIDEO_DECODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_ERROR;
    GST_VIDEO_DECODER_STREAM_UNLOCK (self);
    g_mutex_lock (&self->drain_lock);
    self->draining = FALSE;
    g_cond_broadcast (&self->drain_cond);
    g_mutex_unlock (&self->drain_lock);
    return;
  }
flushing:
  {
    GST_DEBUG_OBJECT (self, "Flushing -- stopping task");
    gst_pad_pause_task (GST_VIDEO_DECODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_FLUSHING;
    GST_VIDEO_DECODER_STREAM_UNLOCK (self);
    return;
  }

flow_error:
  {
    if (flow_ret == GST_FLOW_EOS) {
      GST_DEBUG_OBJECT (self, "EOS");
      gst_pad_push_event (GST_VIDEO_DECODER_SRC_PAD (self),
          gst_event_new_eos ());
      gst_pad_pause_task (GST_VIDEO_DECODER_SRC_PAD (self));
    } else if (flow_ret < GST_FLOW_EOS) {
      GST_ELEMENT_ERROR (self, STREAM, FAILED,
          ("Internal data stream error."), ("stream stopped, reason %s",
              gst_flow_get_name (flow_ret)));
      gst_pad_push_event (GST_VIDEO_DECODER_SRC_PAD (self),
          gst_event_new_eos ());
      gst_pad_pause_task (GST_VIDEO_DECODER_SRC_PAD (self));
    } else if (flow_ret == GST_FLOW_FLUSHING) {
      GST_DEBUG_OBJECT (self, "Flushing -- stopping task");
      gst_pad_pause_task (GST_VIDEO_DECODER_SRC_PAD (self));
    }
    GST_VIDEO_DECODER_STREAM_UNLOCK (self);
    g_mutex_lock (&self->drain_lock);
    self->draining = FALSE;
    g_cond_broadcast (&self->drain_cond);
    g_mutex_unlock (&self->drain_lock);
    return;
  }

failed_to_get_output_buffer:
  {
    GST_VIDEO_DECODER_ERROR_FROM_ERROR (self, err);
    gst_pad_push_event (GST_VIDEO_DECODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_VIDEO_DECODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_ERROR;
    GST_VIDEO_DECODER_STREAM_UNLOCK (self);
    g_mutex_lock (&self->drain_lock);
    self->draining = FALSE;
    g_cond_broadcast (&self->drain_cond);
    g_mutex_unlock (&self->drain_lock);
    return;
  }

invalid_buffer:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL),
        ("Invalid sized input buffer"));
    gst_pad_push_event (GST_VIDEO_DECODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_VIDEO_DECODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED;
    GST_VIDEO_DECODER_STREAM_UNLOCK (self);
    g_mutex_lock (&self->drain_lock);
    self->draining = FALSE;
    g_cond_broadcast (&self->drain_cond);
    g_mutex_unlock (&self->drain_lock);
    return;
  }
}
bool GStreamerReader::DecodeVideoFrame(bool &aKeyFrameSkip,
                                       int64_t aTimeThreshold)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");

  GstBuffer *buffer = nullptr;

  {
    ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);

    if (mReachedVideoEos && !mVideoSinkBufferCount) {
      return false;
    }

    /* Wait something to be decoded before return or continue */
    if (!mVideoSinkBufferCount) {
      if (!mAudioSinkBufferCount) {
        /* We have nothing decoded so it makes no sense to return to the state machine
         * as it will call us back immediately, we'll return again and so on, wasting
         * CPU cycles for no job done. So, block here until there is either video or
         * audio data available
        */
        mon.Wait();
        if (!mVideoSinkBufferCount) {
          /* There is still no video data available, so either there is audio data or
           * something else has happened (Eos, etc...). Return to the state machine
           * to process it
           */
          return true;
        }
      }
      else {
        return true;
      }
    }

    mDecoder->NotifyDecodedFrames(0, 1);

#if GST_VERSION_MAJOR >= 1
    GstSample *sample = gst_app_sink_pull_sample(mVideoAppSink);
    buffer = gst_buffer_ref(gst_sample_get_buffer(sample));
    gst_sample_unref(sample);
#else
    buffer = gst_app_sink_pull_buffer(mVideoAppSink);
#endif
    mVideoSinkBufferCount--;
  }

  bool isKeyframe = !GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_DELTA_UNIT);
  if ((aKeyFrameSkip && !isKeyframe)) {
    gst_buffer_unref(buffer);
    return true;
  }

  int64_t timestamp = GST_BUFFER_TIMESTAMP(buffer);
  {
    ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
    timestamp = gst_segment_to_stream_time(&mVideoSegment,
                                           GST_FORMAT_TIME, timestamp);
  }
  NS_ASSERTION(GST_CLOCK_TIME_IS_VALID(timestamp),
               "frame has invalid timestamp");

  timestamp = GST_TIME_AS_USECONDS(timestamp);
  int64_t duration = 0;
  if (GST_CLOCK_TIME_IS_VALID(GST_BUFFER_DURATION(buffer)))
    duration = GST_TIME_AS_USECONDS(GST_BUFFER_DURATION(buffer));
  else if (fpsNum && fpsDen)
    /* add 1-frame duration */
    duration = gst_util_uint64_scale(GST_USECOND, fpsDen, fpsNum);

  if (timestamp < aTimeThreshold) {
    LOG(PR_LOG_DEBUG, "skipping frame %" GST_TIME_FORMAT
                      " threshold %" GST_TIME_FORMAT,
                      GST_TIME_ARGS(timestamp * 1000),
                      GST_TIME_ARGS(aTimeThreshold * 1000));
    gst_buffer_unref(buffer);
    return true;
  }

  if (!buffer)
    /* no more frames */
    return true;

#if GST_VERSION_MAJOR >= 1
  if (mConfigureAlignment && buffer->pool) {
    GstStructure *config = gst_buffer_pool_get_config(buffer->pool);
    GstVideoAlignment align;
    if (gst_buffer_pool_config_get_video_alignment(config, &align))
      gst_video_info_align(&mVideoInfo, &align);
    gst_structure_free(config);
    mConfigureAlignment = false;
  }
#endif

  nsRefPtr<PlanarYCbCrImage> image = GetImageFromBuffer(buffer);
  if (!image) {
    /* Ugh, upstream is not calling gst_pad_alloc_buffer(). Fallback to
     * allocating a PlanarYCbCrImage backed GstBuffer here and memcpy.
     */
    GstBuffer* tmp = nullptr;
    CopyIntoImageBuffer(buffer, &tmp, image);
    gst_buffer_unref(buffer);
    buffer = tmp;
  }

  int64_t offset = mDecoder->GetResource()->Tell(); // Estimate location in media.
  VideoData* video = VideoData::CreateFromImage(mInfo.mVideo,
                                                mDecoder->GetImageContainer(),
                                                offset, timestamp, duration,
                                                static_cast<Image*>(image.get()),
                                                isKeyframe, -1, mPicture);
  mVideoQueue.Push(video);

  gst_buffer_unref(buffer);

  return true;
}
static GstFlowReturn
gst_audio_aggregator_aggregate (GstAggregator * agg, gboolean timeout)
{
  /* Get all pads that have data for us and store them in a
   * new list.
   *
   * Calculate the current output offset/timestamp and
   * offset_end/timestamp_end. Allocate a silence buffer
   * for this and store it.
   *
   * For all pads:
   * 1) Once per input buffer (cached)
   *   1) Check discont (flag and timestamp with tolerance)
   *   2) If discont or new, resync. That means:
   *     1) Drop all start data of the buffer that comes before
   *        the current position/offset.
   *     2) Calculate the offset (output segment!) that the first
   *        frame of the input buffer corresponds to. Base this on
   *        the running time.
   *
   * 2) If the current pad's offset/offset_end overlaps with the output
   *    offset/offset_end, mix it at the appropiate position in the output
   *    buffer and advance the pad's position. Remember if this pad needs
   *    a new buffer to advance behind the output offset_end.
   *
   * 3) If we had no pad with a buffer, go EOS.
   *
   * 4) If we had at least one pad that did not advance behind output
   *    offset_end, let collected be called again for the current
   *    output offset/offset_end.
   */
  GstElement *element;
  GstAudioAggregator *aagg;
  GList *iter;
  GstFlowReturn ret;
  GstBuffer *outbuf = NULL;
  gint64 next_offset;
  gint64 next_timestamp;
  gint rate, bpf;
  gboolean dropped = FALSE;
  gboolean is_eos = TRUE;
  gboolean is_done = TRUE;
  guint blocksize;

  element = GST_ELEMENT (agg);
  aagg = GST_AUDIO_AGGREGATOR (agg);

  /* Sync pad properties to the stream time */
  gst_aggregator_iterate_sinkpads (agg,
      (GstAggregatorPadForeachFunc) GST_DEBUG_FUNCPTR (sync_pad_values), NULL);

  GST_AUDIO_AGGREGATOR_LOCK (aagg);
  GST_OBJECT_LOCK (agg);

  /* Update position from the segment start/stop if needed */
  if (agg->segment.position == -1) {
    if (agg->segment.rate > 0.0)
      agg->segment.position = agg->segment.start;
    else
      agg->segment.position = agg->segment.stop;
  }

  if (G_UNLIKELY (aagg->info.finfo->format == GST_AUDIO_FORMAT_UNKNOWN)) {
    if (timeout) {
      GST_DEBUG_OBJECT (aagg,
          "Got timeout before receiving any caps, don't output anything");

      /* Advance position */
      if (agg->segment.rate > 0.0)
        agg->segment.position += aagg->priv->output_buffer_duration;
      else if (agg->segment.position > aagg->priv->output_buffer_duration)
        agg->segment.position -= aagg->priv->output_buffer_duration;
      else
        agg->segment.position = 0;

      GST_OBJECT_UNLOCK (agg);
      GST_AUDIO_AGGREGATOR_UNLOCK (aagg);
      return GST_FLOW_OK;
    } else {
      GST_OBJECT_UNLOCK (agg);
      goto not_negotiated;
    }
  }

  if (aagg->priv->send_caps) {
    GST_OBJECT_UNLOCK (agg);
    gst_aggregator_set_src_caps (agg, aagg->current_caps);
    GST_OBJECT_LOCK (agg);

    aagg->priv->send_caps = FALSE;
  }

  rate = GST_AUDIO_INFO_RATE (&aagg->info);
  bpf = GST_AUDIO_INFO_BPF (&aagg->info);

  if (aagg->priv->offset == -1) {
    aagg->priv->offset =
        gst_util_uint64_scale (agg->segment.position - agg->segment.start, rate,
        GST_SECOND);
    GST_DEBUG_OBJECT (aagg, "Starting at offset %" G_GINT64_FORMAT,
        aagg->priv->offset);
  }

  blocksize = gst_util_uint64_scale (aagg->priv->output_buffer_duration,
      rate, GST_SECOND);
  blocksize = MAX (1, blocksize);

  /* for the next timestamp, use the sample counter, which will
   * never accumulate rounding errors */

  /* FIXME: Reverse mixing does not work at all yet */
  if (agg->segment.rate > 0.0) {
    next_offset = aagg->priv->offset + blocksize;
  } else {
    next_offset = aagg->priv->offset - blocksize;
  }

  next_timestamp =
      agg->segment.start + gst_util_uint64_scale (next_offset, GST_SECOND,
      rate);

  if (aagg->priv->current_buffer == NULL) {
    GST_OBJECT_UNLOCK (agg);
    aagg->priv->current_buffer =
        GST_AUDIO_AGGREGATOR_GET_CLASS (aagg)->create_output_buffer (aagg,
        blocksize);
    /* Be careful, some things could have changed ? */
    GST_OBJECT_LOCK (agg);
    GST_BUFFER_FLAG_SET (aagg->priv->current_buffer, GST_BUFFER_FLAG_GAP);
  }
  outbuf = aagg->priv->current_buffer;

  GST_LOG_OBJECT (agg,
      "Starting to mix %u samples for offset %" G_GINT64_FORMAT
      " with timestamp %" GST_TIME_FORMAT, blocksize,
      aagg->priv->offset, GST_TIME_ARGS (agg->segment.position));

  for (iter = element->sinkpads; iter; iter = iter->next) {
    GstBuffer *inbuf;
    GstAudioAggregatorPad *pad = (GstAudioAggregatorPad *) iter->data;
    GstAggregatorPad *aggpad = (GstAggregatorPad *) iter->data;
    gboolean drop_buf = FALSE;
    gboolean pad_eos = gst_aggregator_pad_is_eos (aggpad);

    if (!pad_eos)
      is_eos = FALSE;

    inbuf = gst_aggregator_pad_get_buffer (aggpad);

    GST_OBJECT_LOCK (pad);
    if (!inbuf) {
      if (timeout) {
        if (pad->priv->output_offset < next_offset) {
          gint64 diff = next_offset - pad->priv->output_offset;
          GST_LOG_OBJECT (pad, "Timeout, missing %" G_GINT64_FORMAT " frames (%"
              GST_TIME_FORMAT ")", diff,
              GST_TIME_ARGS (gst_util_uint64_scale (diff, GST_SECOND,
                      GST_AUDIO_INFO_RATE (&aagg->info))));
        }
      } else if (!pad_eos) {
        is_done = FALSE;
      }
      GST_OBJECT_UNLOCK (pad);
      continue;
    }

    g_assert (!pad->priv->buffer || pad->priv->buffer == inbuf);

    /* New buffer? */
    if (!pad->priv->buffer) {
      /* Takes ownership of buffer */
      if (!gst_audio_aggregator_fill_buffer (aagg, pad, inbuf)) {
        dropped = TRUE;
        GST_OBJECT_UNLOCK (pad);
        gst_aggregator_pad_drop_buffer (aggpad);
        continue;
      }
    } else {
      gst_buffer_unref (inbuf);
    }

    if (!pad->priv->buffer && !dropped && pad_eos) {
      GST_DEBUG_OBJECT (aggpad, "Pad is in EOS state");
      GST_OBJECT_UNLOCK (pad);
      continue;
    }

    g_assert (pad->priv->buffer);

    /* This pad is lacking behind, we need to update the offset
     * and maybe drop the current buffer */
    if (pad->priv->output_offset < aagg->priv->offset) {
      gint64 diff = aagg->priv->offset - pad->priv->output_offset;
      gint64 odiff = diff;

      if (pad->priv->position + diff > pad->priv->size)
        diff = pad->priv->size - pad->priv->position;
      pad->priv->position += diff;
      pad->priv->output_offset += diff;

      if (pad->priv->position == pad->priv->size) {
        GST_LOG_OBJECT (pad, "Buffer was late by %" GST_TIME_FORMAT
            ", dropping %" GST_PTR_FORMAT,
            GST_TIME_ARGS (gst_util_uint64_scale (odiff, GST_SECOND,
                    GST_AUDIO_INFO_RATE (&aagg->info))), pad->priv->buffer);
        /* Buffer done, drop it */
        gst_buffer_replace (&pad->priv->buffer, NULL);
        dropped = TRUE;
        GST_OBJECT_UNLOCK (pad);
        gst_aggregator_pad_drop_buffer (aggpad);
        continue;
      }
    }


    if (pad->priv->output_offset >= aagg->priv->offset
        && pad->priv->output_offset <
        aagg->priv->offset + blocksize && pad->priv->buffer) {
      GST_LOG_OBJECT (aggpad, "Mixing buffer for current offset");
      drop_buf = !gst_audio_aggregator_mix_buffer (aagg, pad, pad->priv->buffer,
          outbuf);
      if (pad->priv->output_offset >= next_offset) {
        GST_DEBUG_OBJECT (pad,
            "Pad is after current offset: %" G_GUINT64_FORMAT " >= %"
            G_GINT64_FORMAT, pad->priv->output_offset, next_offset);
      } else {
        is_done = FALSE;
      }
    }

    GST_OBJECT_UNLOCK (pad);
    if (drop_buf)
      gst_aggregator_pad_drop_buffer (aggpad);

  }
  GST_OBJECT_UNLOCK (agg);

  if (dropped) {
    /* We dropped a buffer, retry */
    GST_INFO_OBJECT (aagg, "A pad dropped a buffer, wait for the next one");
    GST_AUDIO_AGGREGATOR_UNLOCK (aagg);
    return GST_FLOW_OK;
  }

  if (!is_done && !is_eos) {
    /* Get more buffers */
    GST_INFO_OBJECT (aagg,
        "We're not done yet for the current offset," " waiting for more data");
    GST_AUDIO_AGGREGATOR_UNLOCK (aagg);
    return GST_FLOW_OK;
  }

  if (is_eos) {
    gint64 max_offset = 0;

    GST_DEBUG_OBJECT (aagg, "We're EOS");

    GST_OBJECT_LOCK (agg);
    for (iter = GST_ELEMENT (agg)->sinkpads; iter; iter = iter->next) {
      GstAudioAggregatorPad *pad = GST_AUDIO_AGGREGATOR_PAD (iter->data);

      max_offset = MAX ((gint64) max_offset, (gint64) pad->priv->output_offset);
    }
    GST_OBJECT_UNLOCK (agg);

    /* This means EOS or nothing mixed in at all */
    if (aagg->priv->offset == max_offset) {
      gst_buffer_replace (&aagg->priv->current_buffer, NULL);
      GST_AUDIO_AGGREGATOR_UNLOCK (aagg);
      return GST_FLOW_EOS;
    }

    if (max_offset <= next_offset) {
      GST_DEBUG_OBJECT (aagg,
          "Last buffer is incomplete: %" G_GUINT64_FORMAT " <= %"
          G_GINT64_FORMAT, max_offset, next_offset);
      next_offset = max_offset;
      next_timestamp =
          agg->segment.start + gst_util_uint64_scale (next_offset, GST_SECOND,
          rate);

      if (next_offset > aagg->priv->offset)
        gst_buffer_resize (outbuf, 0, (next_offset - aagg->priv->offset) * bpf);
    }
  }

  /* set timestamps on the output buffer */
  GST_OBJECT_LOCK (agg);
  if (agg->segment.rate > 0.0) {
    GST_BUFFER_PTS (outbuf) = agg->segment.position;
    GST_BUFFER_OFFSET (outbuf) = aagg->priv->offset;
    GST_BUFFER_OFFSET_END (outbuf) = next_offset;
    GST_BUFFER_DURATION (outbuf) = next_timestamp - agg->segment.position;
  } else {
    GST_BUFFER_PTS (outbuf) = next_timestamp;
    GST_BUFFER_OFFSET (outbuf) = next_offset;
    GST_BUFFER_OFFSET_END (outbuf) = aagg->priv->offset;
    GST_BUFFER_DURATION (outbuf) = agg->segment.position - next_timestamp;
  }

  GST_OBJECT_UNLOCK (agg);

  /* send it out */
  GST_LOG_OBJECT (aagg,
      "pushing outbuf %p, timestamp %" GST_TIME_FORMAT " offset %"
      G_GINT64_FORMAT, outbuf, GST_TIME_ARGS (GST_BUFFER_PTS (outbuf)),
      GST_BUFFER_OFFSET (outbuf));

  GST_AUDIO_AGGREGATOR_UNLOCK (aagg);

  ret = gst_aggregator_finish_buffer (agg, aagg->priv->current_buffer);
  aagg->priv->current_buffer = NULL;

  GST_LOG_OBJECT (aagg, "pushed outbuf, result = %s", gst_flow_get_name (ret));

  GST_AUDIO_AGGREGATOR_LOCK (aagg);
  GST_OBJECT_LOCK (agg);
  aagg->priv->offset = next_offset;
  agg->segment.position = next_timestamp;

  /* If there was a timeout and there was a gap in data in out of the streams,
   * then it's a very good time to for a resync with the timestamps.
   */
  if (timeout) {
    for (iter = element->sinkpads; iter; iter = iter->next) {
      GstAudioAggregatorPad *pad = GST_AUDIO_AGGREGATOR_PAD (iter->data);

      GST_OBJECT_LOCK (pad);
      if (pad->priv->output_offset < aagg->priv->offset)
        pad->priv->output_offset = -1;
      GST_OBJECT_UNLOCK (pad);
    }
  }
  GST_OBJECT_UNLOCK (agg);
  GST_AUDIO_AGGREGATOR_UNLOCK (aagg);

  return ret;
  /* ERRORS */
not_negotiated:
  {
    GST_AUDIO_AGGREGATOR_UNLOCK (aagg);
    GST_ELEMENT_ERROR (aagg, STREAM, FORMAT, (NULL),
        ("Unknown data received, not negotiated"));
    return GST_FLOW_NOT_NEGOTIATED;
  }
}
static GstFlowReturn
gst_amc_video_dec_handle_frame (GstVideoDecoder * decoder,
    GstVideoCodecFrame * frame)
{
  GstAmcVideoDec *self;
  gint idx;
  GstAmcBuffer *buf;
  GstAmcBufferInfo buffer_info;
  guint offset = 0;
  GstClockTime timestamp, duration, timestamp_offset = 0;
  GstMapInfo minfo;
  GError *err = NULL;

  memset (&minfo, 0, sizeof (minfo));

  self = GST_AMC_VIDEO_DEC (decoder);

  GST_DEBUG_OBJECT (self, "Handling frame");

  if (!self->started) {
    GST_ERROR_OBJECT (self, "Codec not started yet");
    gst_video_codec_frame_unref (frame);
    return GST_FLOW_NOT_NEGOTIATED;
  }

  if (self->flushing)
    goto flushing;

  if (self->downstream_flow_ret != GST_FLOW_OK)
    goto downstream_error;

  timestamp = frame->pts;
  duration = frame->duration;

  gst_buffer_map (frame->input_buffer, &minfo, GST_MAP_READ);

  while (offset < minfo.size) {
    /* Make sure to release the base class stream lock, otherwise
     * _loop() can't call _finish_frame() and we might block forever
     * because no input buffers are released */
    GST_VIDEO_DECODER_STREAM_UNLOCK (self);
    /* Wait at most 100ms here, some codecs don't fail dequeueing if
     * the codec is flushing, causing deadlocks during shutdown */
    idx = gst_amc_codec_dequeue_input_buffer (self->codec, 100000, &err);
    GST_VIDEO_DECODER_STREAM_LOCK (self);

    if (idx < 0) {
      if (self->flushing || self->downstream_flow_ret == GST_FLOW_FLUSHING) {
        g_clear_error (&err);
        goto flushing;
      }

      switch (idx) {
        case INFO_TRY_AGAIN_LATER:
          GST_DEBUG_OBJECT (self, "Dequeueing input buffer timed out");
          continue;             /* next try */
          break;
        case G_MININT:
          GST_ERROR_OBJECT (self, "Failed to dequeue input buffer");
          goto dequeue_error;
        default:
          g_assert_not_reached ();
          break;
      }

      continue;
    }

    if (self->flushing) {
      memset (&buffer_info, 0, sizeof (buffer_info));
      gst_amc_codec_queue_input_buffer (self->codec, idx, &buffer_info, NULL);
      goto flushing;
    }

    if (self->downstream_flow_ret != GST_FLOW_OK) {
      memset (&buffer_info, 0, sizeof (buffer_info));
      gst_amc_codec_queue_input_buffer (self->codec, idx, &buffer_info, &err);
      if (err && !self->flushing)
        GST_ELEMENT_WARNING_FROM_ERROR (self, err);
      g_clear_error (&err);
      goto downstream_error;
    }

    /* Now handle the frame */

    /* Copy the buffer content in chunks of size as requested
     * by the port */
    buf = gst_amc_codec_get_input_buffer (self->codec, idx, &err);
    if (!buf)
      goto failed_to_get_input_buffer;

    memset (&buffer_info, 0, sizeof (buffer_info));
    buffer_info.offset = 0;
    buffer_info.size = MIN (minfo.size - offset, buf->size);
    gst_amc_buffer_set_position_and_limit (buf, NULL, buffer_info.offset,
        buffer_info.size);

    orc_memcpy (buf->data, minfo.data + offset, buffer_info.size);

    gst_amc_buffer_free (buf);
    buf = NULL;

    /* Interpolate timestamps if we're passing the buffer
     * in multiple chunks */
    if (offset != 0 && duration != GST_CLOCK_TIME_NONE) {
      timestamp_offset = gst_util_uint64_scale (offset, duration, minfo.size);
    }

    if (timestamp != GST_CLOCK_TIME_NONE) {
      buffer_info.presentation_time_us =
          gst_util_uint64_scale (timestamp + timestamp_offset, 1, GST_USECOND);
      self->last_upstream_ts = timestamp + timestamp_offset;
    }
    if (duration != GST_CLOCK_TIME_NONE)
      self->last_upstream_ts += duration;

    if (offset == 0) {
      BufferIdentification *id =
          buffer_identification_new (timestamp + timestamp_offset);
      if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame))
        buffer_info.flags |= BUFFER_FLAG_SYNC_FRAME;
      gst_video_codec_frame_set_user_data (frame, id,
          (GDestroyNotify) buffer_identification_free);
    }

    offset += buffer_info.size;
    GST_DEBUG_OBJECT (self,
        "Queueing buffer %d: size %d time %" G_GINT64_FORMAT " flags 0x%08x",
        idx, buffer_info.size, buffer_info.presentation_time_us,
        buffer_info.flags);
    if (!gst_amc_codec_queue_input_buffer (self->codec, idx, &buffer_info,
            &err)) {
      if (self->flushing) {
        g_clear_error (&err);
        goto flushing;
      }
      goto queue_error;
    }
    self->drained = FALSE;
  }

  gst_buffer_unmap (frame->input_buffer, &minfo);
  gst_video_codec_frame_unref (frame);

  return self->downstream_flow_ret;

downstream_error:
  {
    GST_ERROR_OBJECT (self, "Downstream returned %s",
        gst_flow_get_name (self->downstream_flow_ret));
    if (minfo.data)
      gst_buffer_unmap (frame->input_buffer, &minfo);
    gst_video_codec_frame_unref (frame);
    return self->downstream_flow_ret;
  }
failed_to_get_input_buffer:
  {
    GST_ELEMENT_ERROR_FROM_ERROR (self, err);
    if (minfo.data)
      gst_buffer_unmap (frame->input_buffer, &minfo);
    gst_video_codec_frame_unref (frame);
    return GST_FLOW_ERROR;
  }
dequeue_error:
  {
    GST_ELEMENT_ERROR_FROM_ERROR (self, err);
    if (minfo.data)
      gst_buffer_unmap (frame->input_buffer, &minfo);
    gst_video_codec_frame_unref (frame);
    return GST_FLOW_ERROR;
  }
queue_error:
  {
    GST_VIDEO_DECODER_ERROR_FROM_ERROR (self, err);
    if (minfo.data)
      gst_buffer_unmap (frame->input_buffer, &minfo);
    gst_video_codec_frame_unref (frame);
    return GST_FLOW_ERROR;
  }
flushing:
  {
    GST_DEBUG_OBJECT (self, "Flushing -- returning FLUSHING");
    if (minfo.data)
      gst_buffer_unmap (frame->input_buffer, &minfo);
    gst_video_codec_frame_unref (frame);
    return GST_FLOW_FLUSHING;
  }
}
Exemple #8
0
static void
spc_play (GstPad * pad)
{
  GstSpcDec *spc = GST_SPC_DEC (gst_pad_get_parent (pad));
  GstFlowReturn flow_return;
  GstBuffer *out;
  gboolean seeking = spc->seeking;
  gint64 duration, fade, end, position;

  if (!seeking) {
    out = gst_buffer_new_and_alloc (1600 * 4);
    gst_buffer_set_caps (out, GST_PAD_CAPS (pad));
    GST_BUFFER_TIMESTAMP (out) =
        (gint64) gst_util_uint64_scale ((guint64) spc->byte_pos, GST_SECOND,
        32000 * 2 * 2);
    spc->byte_pos += OSPC_Run (-1, (short *) GST_BUFFER_DATA (out), 1600 * 4);
  } else {
    if (spc->seekpoint < spc->byte_pos) {
      OSPC_Init (GST_BUFFER_DATA (spc->buf), GST_BUFFER_SIZE (spc->buf));
      spc->byte_pos = 0;
    }
    spc->byte_pos += OSPC_Run (-1, NULL, 1600 * 4);
    if (spc->byte_pos >= spc->seekpoint) {
      spc->seeking = FALSE;
    }
    out = gst_buffer_new ();
    gst_buffer_set_caps (out, GST_PAD_CAPS (pad));
  }

  duration = gst_spc_duration (spc);
  fade = gst_spc_fadeout (spc);
  end = duration + fade;
  position =
      (gint64) gst_util_uint64_scale ((guint64) spc->byte_pos, GST_SECOND,
      32000 * 2 * 2);

  if (position >= duration) {
    gint16 *data = (gint16 *) GST_BUFFER_DATA (out);
    guint32 size = GST_BUFFER_SIZE (out) / sizeof (gint16);
    unsigned int i;

    gint64 num = (fade - (position - duration));

    for (i = 0; i < size; i++) {
      /* Apply a parabolic volume envelope */
      data[i] = (gint16) (data[i] * num / fade * num / fade);
    }
  }

  if ((flow_return = gst_pad_push (spc->srcpad, out)) != GST_FLOW_OK) {
    GST_DEBUG_OBJECT (spc, "pausing task, reason %s",
        gst_flow_get_name (flow_return));

    gst_pad_pause_task (pad);

    if (flow_return <= GST_FLOW_UNEXPECTED
        || flow_return == GST_FLOW_NOT_LINKED) {
      gst_pad_push_event (pad, gst_event_new_eos ());
    }
  }

  if (position >= end) {
    gst_pad_pause_task (pad);
    gst_pad_push_event (pad, gst_event_new_eos ());
  }

  gst_object_unref (spc);

  return;
}
Exemple #9
0
static gboolean
gst_aiff_parse_pad_convert (GstPad * pad,
    GstFormat src_format, gint64 src_value,
    GstFormat * dest_format, gint64 * dest_value)
{
  GstAiffParse *aiffparse;
  gboolean res = TRUE;

  aiffparse = GST_AIFF_PARSE (GST_PAD_PARENT (pad));

  if (*dest_format == src_format) {
    *dest_value = src_value;
    return TRUE;
  }

  if (aiffparse->bytes_per_sample <= 0)
    return FALSE;

  GST_INFO_OBJECT (aiffparse, "converting value from %s to %s",
      gst_format_get_name (src_format), gst_format_get_name (*dest_format));

  switch (src_format) {
    case GST_FORMAT_BYTES:
      switch (*dest_format) {
        case GST_FORMAT_DEFAULT:
          *dest_value = src_value / aiffparse->bytes_per_sample;
          break;
        case GST_FORMAT_TIME:
          if (aiffparse->bps > 0) {
            *dest_value = gst_util_uint64_scale_ceil (src_value, GST_SECOND,
                (guint64) aiffparse->bps);
            break;
          }
          /* Else fallthrough */
        default:
          res = FALSE;
          goto done;
      }
      break;

    case GST_FORMAT_DEFAULT:
      switch (*dest_format) {
        case GST_FORMAT_BYTES:
          *dest_value = src_value * aiffparse->bytes_per_sample;
          break;
        case GST_FORMAT_TIME:
          *dest_value = gst_util_uint64_scale (src_value, GST_SECOND,
              (guint64) aiffparse->rate);
          break;
        default:
          res = FALSE;
          goto done;
      }
      break;

    case GST_FORMAT_TIME:
      switch (*dest_format) {
        case GST_FORMAT_BYTES:
          if (aiffparse->bps > 0) {
            *dest_value = gst_util_uint64_scale (src_value,
                (guint64) aiffparse->bps, GST_SECOND);
            break;
          }
          /* Else fallthrough */
          break;
        case GST_FORMAT_DEFAULT:
          *dest_value = gst_util_uint64_scale (src_value,
              (guint64) aiffparse->rate, GST_SECOND);
          break;
        default:
          res = FALSE;
          goto done;
      }
      break;

    default:
      res = FALSE;
      goto done;
  }

done:
  return res;

}
static gboolean
theora_parse_src_convert (GstPad * pad,
    GstFormat src_format, gint64 src_value,
    GstFormat * dest_format, gint64 * dest_value)
{
  gboolean res = TRUE;
  GstTheoraParse *parse;
  guint64 scale = 1;

  if (src_format == *dest_format) {
    *dest_value = src_value;
    return TRUE;
  }

  parse = GST_THEORA_PARSE (gst_pad_get_parent (pad));

  /* we need the info part before we can done something */
  if (!parse->streamheader_received)
    goto no_header;

  switch (src_format) {
    case GST_FORMAT_BYTES:
      switch (*dest_format) {
        case GST_FORMAT_DEFAULT:
          *dest_value = gst_util_uint64_scale_int (src_value, 2,
              parse->info.pic_height * parse->info.pic_width * 3);
          break;
        case GST_FORMAT_TIME:
          /* seems like a rather silly conversion, implement me if you like */
        default:
          res = FALSE;
      }
      break;
    case GST_FORMAT_TIME:
      switch (*dest_format) {
        case GST_FORMAT_BYTES:
          scale = 3 * (parse->info.pic_width * parse->info.pic_height) / 2;
        case GST_FORMAT_DEFAULT:
          *dest_value = scale * gst_util_uint64_scale (src_value,
              parse->info.fps_numerator,
              parse->info.fps_denominator * GST_SECOND);
          break;
        default:
          GST_DEBUG_OBJECT (parse, "cannot convert to format %s",
              gst_format_get_name (*dest_format));
          res = FALSE;
      }
      break;
    case GST_FORMAT_DEFAULT:
      switch (*dest_format) {
        case GST_FORMAT_TIME:
          *dest_value = gst_util_uint64_scale (src_value,
              GST_SECOND * parse->info.fps_denominator,
              parse->info.fps_numerator);
          break;
        case GST_FORMAT_BYTES:
          *dest_value = gst_util_uint64_scale_int (src_value,
              3 * parse->info.pic_width * parse->info.pic_height, 2);
          break;
        default:
          res = FALSE;
      }
      break;
    default:
      res = FALSE;
  }
done:
  gst_object_unref (parse);
  return res;

  /* ERRORS */
no_header:
  {
    GST_DEBUG_OBJECT (parse, "no header yet, cannot convert");
    res = FALSE;
    goto done;
  }
}
Exemple #11
0
static gboolean
gst_spc_dec_src_event (GstPad * pad, GstEvent * event)
{
  GstSpcDec *spc = GST_SPC_DEC (gst_pad_get_parent (pad));
  gboolean result = FALSE;

  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_SEEK:
    {
      gdouble rate;
      GstFormat format;
      GstSeekFlags flags;
      GstSeekType start_type, stop_type;
      gint64 start, stop;
      gboolean flush;

      gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
          &stop_type, &stop);

      if (format != GST_FORMAT_TIME) {
        GST_DEBUG_OBJECT (spc, "seeking is only supported in TIME format");
        break;
      }

      if (start_type != GST_SEEK_TYPE_SET || stop_type != GST_SEEK_TYPE_NONE) {
        GST_DEBUG_OBJECT (spc, "unsupported seek type");
        break;
      }

      if (stop_type == GST_SEEK_TYPE_NONE)
        stop = GST_CLOCK_TIME_NONE;

      if (start_type == GST_SEEK_TYPE_SET) {
        guint64 cur =
            gst_util_uint64_scale (spc->byte_pos, GST_SECOND, 32000 * 2 * 2);
        guint64 dest = (guint64) start;

        dest = CLAMP (dest, 0, gst_spc_duration (spc) + gst_spc_fadeout (spc));

        if (dest == cur)
          break;

        flush = (flags & GST_SEEK_FLAG_FLUSH) == GST_SEEK_FLAG_FLUSH;

        if (flush) {
          gst_pad_push_event (spc->srcpad, gst_event_new_flush_start ());
        } else {
          gst_pad_stop_task (spc->srcpad);
        }

        GST_PAD_STREAM_LOCK (spc->srcpad);

        if (flags & GST_SEEK_FLAG_SEGMENT) {
          gst_element_post_message (GST_ELEMENT (spc),
              gst_message_new_segment_start (GST_OBJECT (spc), format, cur));
        }

        if (flush) {
          gst_pad_push_event (spc->srcpad, gst_event_new_flush_stop ());
        }

        if (stop == GST_CLOCK_TIME_NONE)
          stop = (guint64) (gst_spc_duration (spc) + gst_spc_fadeout (spc));

        gst_pad_push_event (spc->srcpad, gst_event_new_new_segment (FALSE, rate,
                GST_FORMAT_TIME, dest, stop, dest));

        /* spc->byte_pos += OSPC_Run(-1, NULL, (unsigned int) (gst_util_uint64_scale(dest - cur, 32000*2*2, GST_SECOND))); */
        spc->seekpoint =
            gst_util_uint64_scale (dest, 32000 * 2 * 2, GST_SECOND);
        spc->seeking = TRUE;

        gst_pad_start_task (spc->srcpad, (GstTaskFunction) spc_play,
            spc->srcpad);

        GST_PAD_STREAM_UNLOCK (spc->srcpad);
        result = TRUE;
      }
      break;
    }
    default:
      break;
  }

  gst_event_unref (event);
  gst_object_unref (spc);

  return result;
}
Exemple #12
0
static GstFlowReturn
gst_vp8_enc_pre_push (GstVideoEncoder * video_encoder,
    GstVideoCodecFrame * frame)
{
  GstVP8Enc *encoder;
  GstVPXEnc *vpx_enc;
  GstBuffer *buf;
  GstFlowReturn ret = GST_FLOW_OK;
  GstVP8EncUserData *user_data = gst_video_codec_frame_get_user_data (frame);
  GList *l;
  gint inv_count;
  GstVideoInfo *info;

  GST_DEBUG_OBJECT (video_encoder, "pre_push");

  encoder = GST_VP8_ENC (video_encoder);
  vpx_enc = GST_VPX_ENC (encoder);

  info = &vpx_enc->input_state->info;

  g_assert (user_data != NULL);

  for (inv_count = 0, l = user_data->invisible; l; inv_count++, l = l->next) {
    buf = l->data;
    l->data = NULL;

    /* FIXME : All of this should have already been handled by base classes, no ? */
    if (l == user_data->invisible
        && GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
      GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
      encoder->keyframe_distance = 0;
    } else {
      GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
      encoder->keyframe_distance++;
    }

    GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DECODE_ONLY);
    GST_BUFFER_TIMESTAMP (buf) = GST_BUFFER_TIMESTAMP (frame->output_buffer);
    GST_BUFFER_DURATION (buf) = 0;
    if (GST_VIDEO_INFO_FPS_D (info) == 0 || GST_VIDEO_INFO_FPS_N (info) == 0) {
      GST_BUFFER_OFFSET_END (buf) = GST_BUFFER_OFFSET_NONE;
      GST_BUFFER_OFFSET (buf) = GST_BUFFER_OFFSET_NONE;
    } else {
      GST_BUFFER_OFFSET_END (buf) =
          _to_granulepos (frame->presentation_frame_number + 1,
          inv_count, encoder->keyframe_distance);
      GST_BUFFER_OFFSET (buf) =
          gst_util_uint64_scale (frame->presentation_frame_number + 1,
          GST_SECOND * GST_VIDEO_INFO_FPS_D (info),
          GST_VIDEO_INFO_FPS_N (info));
    }

    ret = gst_pad_push (GST_VIDEO_ENCODER_SRC_PAD (video_encoder), buf);

    if (ret != GST_FLOW_OK) {
      GST_WARNING_OBJECT (encoder, "flow error %d", ret);
      goto done;
    }
  }

  buf = frame->output_buffer;

  /* FIXME : All of this should have already been handled by base classes, no ? */
  if (!user_data->invisible && GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
    GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
    encoder->keyframe_distance = 0;
  } else {
    GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
    encoder->keyframe_distance++;
  }

  if (GST_VIDEO_INFO_FPS_D (info) == 0 || GST_VIDEO_INFO_FPS_N (info) == 0) {
    GST_BUFFER_OFFSET_END (buf) = GST_BUFFER_OFFSET_NONE;
    GST_BUFFER_OFFSET (buf) = GST_BUFFER_OFFSET_NONE;
  } else {
    GST_BUFFER_OFFSET_END (buf) =
        _to_granulepos (frame->presentation_frame_number + 1, 0,
        encoder->keyframe_distance);
    GST_BUFFER_OFFSET (buf) =
        gst_util_uint64_scale (frame->presentation_frame_number + 1,
        GST_SECOND * GST_VIDEO_INFO_FPS_D (info), GST_VIDEO_INFO_FPS_N (info));
  }

  GST_LOG_OBJECT (video_encoder, "src ts: %" GST_TIME_FORMAT,
      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)));

done:
  return ret;
}
static GstFlowReturn
gst_ffmpegenc_chain_audio (GstPad * pad, GstBuffer * inbuf)
{
  GstFFMpegEnc *ffmpegenc;
  GstFFMpegEncClass *oclass;
  AVCodecContext *ctx;
  GstClockTime timestamp, duration;
  guint size, frame_size;
  gint osize;
  GstFlowReturn ret;
  gint out_size;
  gboolean discont;
  guint8 *in_data;

  ffmpegenc = (GstFFMpegEnc *) (GST_OBJECT_PARENT (pad));
  oclass = (GstFFMpegEncClass *) G_OBJECT_GET_CLASS (ffmpegenc);

  ctx = ffmpegenc->context;

  size = GST_BUFFER_SIZE (inbuf);
  timestamp = GST_BUFFER_TIMESTAMP (inbuf);
  duration = GST_BUFFER_DURATION (inbuf);
  discont = GST_BUFFER_IS_DISCONT (inbuf);

  GST_DEBUG_OBJECT (ffmpegenc,
      "Received time %" GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT
      ", size %d", GST_TIME_ARGS (timestamp), GST_TIME_ARGS (duration), size);

  frame_size = ctx->frame_size;
  osize = av_get_bits_per_sample_format (ctx->sample_fmt) / 8;

  if (frame_size > 1) {
    /* we have a frame_size, feed the encoder multiples of this frame size */
    guint avail, frame_bytes;

    if (discont) {
      GST_LOG_OBJECT (ffmpegenc, "DISCONT, clear adapter");
      gst_adapter_clear (ffmpegenc->adapter);
      ffmpegenc->discont = TRUE;
    }

    if (gst_adapter_available (ffmpegenc->adapter) == 0) {
      /* lock on to new timestamp */
      GST_LOG_OBJECT (ffmpegenc, "taking buffer timestamp %" GST_TIME_FORMAT,
          GST_TIME_ARGS (timestamp));
      ffmpegenc->adapter_ts = timestamp;
      ffmpegenc->adapter_consumed = 0;
    } else {
      GstClockTime upstream_time;
      guint64 bytes;

      /* use timestamp at head of the adapter */
      GST_LOG_OBJECT (ffmpegenc, "taking adapter timestamp %" GST_TIME_FORMAT,
          GST_TIME_ARGS (ffmpegenc->adapter_ts));
      timestamp = ffmpegenc->adapter_ts;
      timestamp +=
          gst_util_uint64_scale (ffmpegenc->adapter_consumed, GST_SECOND,
          ctx->sample_rate);
      /* check with upstream timestamps, if too much deviation,
       * forego some timestamp perfection in favour of upstream syncing
       * (particularly in case these do not happen to come in multiple
       * of frame size) */
      upstream_time = gst_adapter_prev_timestamp (ffmpegenc->adapter, &bytes);
      if (GST_CLOCK_TIME_IS_VALID (upstream_time)) {
        GstClockTimeDiff diff;

        upstream_time +=
            gst_util_uint64_scale (bytes, GST_SECOND, ctx->sample_rate);
        diff = upstream_time - timestamp;
        /* relaxed difference, rather than half a sample or so ... */
        if (diff > GST_SECOND / 10 || diff < -GST_SECOND / 10) {
          GST_DEBUG_OBJECT (ffmpegenc, "adapter timestamp drifting, "
              "taking upstream timestamp %" GST_TIME_FORMAT,
              GST_TIME_ARGS (upstream_time));
          timestamp = upstream_time;
          ffmpegenc->adapter_ts = upstream_time -
              gst_util_uint64_scale (bytes, GST_SECOND, ctx->sample_rate);
          ffmpegenc->adapter_consumed = bytes;
          ffmpegenc->discont = TRUE;
        }
      }
    }

    GST_LOG_OBJECT (ffmpegenc, "pushing buffer in adapter");
    gst_adapter_push (ffmpegenc->adapter, inbuf);

    /* first see how many bytes we need to feed to the decoder. */
    frame_bytes = frame_size * osize * ctx->channels;
    avail = gst_adapter_available (ffmpegenc->adapter);

    GST_LOG_OBJECT (ffmpegenc, "frame_bytes %u, avail %u", frame_bytes, avail);

    /* while there is more than a frame size in the adapter, consume it */
    while (avail >= frame_bytes) {
      GST_LOG_OBJECT (ffmpegenc, "taking %u bytes from the adapter",
          frame_bytes);

      /* take an audio buffer out of the adapter */
      in_data = (guint8 *) gst_adapter_peek (ffmpegenc->adapter, frame_bytes);
      ffmpegenc->adapter_consumed += frame_size;

      /* calculate timestamp and duration relative to start of adapter and to
       * the amount of samples we consumed */
      duration =
          gst_util_uint64_scale (ffmpegenc->adapter_consumed, GST_SECOND,
          ctx->sample_rate);
      duration -= (timestamp - ffmpegenc->adapter_ts);

      /* 4 times the input size plus the minimal buffer size
       * should be big enough... */
      out_size = frame_bytes * 4 + FF_MIN_BUFFER_SIZE;

      ret = gst_ffmpegenc_encode_audio (ffmpegenc, in_data, out_size,
          timestamp, duration, ffmpegenc->discont);

      gst_adapter_flush (ffmpegenc->adapter, frame_bytes);

      if (ret != GST_FLOW_OK)
        goto push_failed;

      /* advance the adapter timestamp with the duration */
      timestamp += duration;

      ffmpegenc->discont = FALSE;
      avail = gst_adapter_available (ffmpegenc->adapter);
    }
    GST_LOG_OBJECT (ffmpegenc, "%u bytes left in the adapter", avail);
  } else {
    /* we have no frame_size, feed the encoder all the data and expect a fixed
     * output size */
    int coded_bps = av_get_bits_per_sample (oclass->in_plugin->id) / 8;

    GST_LOG_OBJECT (ffmpegenc, "coded bps %d, osize %d", coded_bps, osize);

    out_size = size / osize;
    if (coded_bps)
      out_size *= coded_bps;

    /* We need to provide at least ffmpegs minimal buffer size */
    out_size += FF_MIN_BUFFER_SIZE;

    in_data = (guint8 *) GST_BUFFER_DATA (inbuf);
    ret = gst_ffmpegenc_encode_audio (ffmpegenc, in_data, out_size,
        timestamp, duration, discont);
    gst_buffer_unref (inbuf);

    if (ret != GST_FLOW_OK)
      goto push_failed;
  }

  return GST_FLOW_OK;

  /* ERRORS */
push_failed:
  {
    GST_DEBUG_OBJECT (ffmpegenc, "Failed to push buffer %d (%s)", ret,
        gst_flow_get_name (ret));
    return ret;
  }
}
Exemple #14
0
static void
convert_to_internal_clock (GstDecklinkVideoSink * self,
    GstClockTime * timestamp, GstClockTime * duration)
{
  GstClock *clock, *audio_clock;

  g_assert (timestamp != NULL);

  clock = gst_element_get_clock (GST_ELEMENT_CAST (self));
  audio_clock = gst_decklink_output_get_audio_clock (self->output);
  if (clock && clock != self->output->clock && clock != audio_clock) {
    GstClockTime internal, external, rate_n, rate_d;
    gst_clock_get_calibration (self->output->clock, &internal, &external,
        &rate_n, &rate_d);

    if (self->internal_base_time != GST_CLOCK_TIME_NONE) {
      GstClockTime external_timestamp = *timestamp;
      GstClockTime base_time;

      // Convert to the running time corresponding to both clock times
      if (internal < self->internal_base_time)
        internal = 0;
      else
        internal -= self->internal_base_time;

      if (external < self->external_base_time)
        external = 0;
      else
        external -= self->external_base_time;

      // Convert timestamp to the "running time" since we started scheduled
      // playback, that is the difference between the pipeline's base time
      // and our own base time.
      base_time = gst_element_get_base_time (GST_ELEMENT_CAST (self));
      if (base_time > self->external_base_time)
        base_time = 0;
      else
        base_time = self->external_base_time - base_time;

      if (external_timestamp < base_time)
        external_timestamp = 0;
      else
        external_timestamp = external_timestamp - base_time;

      // Get the difference in the external time, note
      // that the running time is external time.
      // Then scale this difference and offset it to
      // our internal time. Now we have the running time
      // according to our internal clock.
      //
      // For the duration we just scale
      if (external > external_timestamp) {
        guint64 diff = external - external_timestamp;
        diff = gst_util_uint64_scale (diff, rate_d, rate_n);
        *timestamp = internal - diff;
      } else {
        guint64 diff = external_timestamp - external;
        diff = gst_util_uint64_scale (diff, rate_d, rate_n);
        *timestamp = internal + diff;
      }

      GST_LOG_OBJECT (self,
          "Converted %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT " (internal: %"
          GST_TIME_FORMAT " external %" GST_TIME_FORMAT " rate: %lf)",
          GST_TIME_ARGS (external_timestamp), GST_TIME_ARGS (*timestamp),
          GST_TIME_ARGS (internal), GST_TIME_ARGS (external),
          ((gdouble) rate_n) / ((gdouble) rate_d));

      if (duration) {
        GstClockTime external_duration = *duration;

        *duration = gst_util_uint64_scale (external_duration, rate_d, rate_n);

        GST_LOG_OBJECT (self,
            "Converted duration %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
            " (internal: %" GST_TIME_FORMAT " external %" GST_TIME_FORMAT
            " rate: %lf)", GST_TIME_ARGS (external_duration),
            GST_TIME_ARGS (*duration), GST_TIME_ARGS (internal),
            GST_TIME_ARGS (external), ((gdouble) rate_n) / ((gdouble) rate_d));
      }
    } else {
      GST_LOG_OBJECT (self, "No clock conversion needed, not started yet");
    }
  } else {
    GST_LOG_OBJECT (self, "No clock conversion needed, same clocks");
  }
}
static GstFlowReturn
gst_amc_audio_dec_handle_frame (GstAudioDecoder * decoder, GstBuffer * inbuf)
{
  GstAmcAudioDec *self;
  gint idx;
  GstAmcBuffer *buf;
  GstAmcBufferInfo buffer_info;
  guint offset = 0;
  GstClockTime timestamp, duration, timestamp_offset = 0;
  GstMapInfo minfo;

  memset (&minfo, 0, sizeof (minfo));

  self = GST_AMC_AUDIO_DEC (decoder);

  GST_DEBUG_OBJECT (self, "Handling frame");

  /* Make sure to keep a reference to the input here,
   * it can be unreffed from the other thread if
   * finish_frame() is called */
  if (inbuf)
    inbuf = gst_buffer_ref (inbuf);

  if (!self->started) {
    GST_ERROR_OBJECT (self, "Codec not started yet");
    if (inbuf)
      gst_buffer_unref (inbuf);
    return GST_FLOW_NOT_NEGOTIATED;
  }

  if (self->eos) {
    GST_WARNING_OBJECT (self, "Got frame after EOS");
    if (inbuf)
      gst_buffer_unref (inbuf);
    return GST_FLOW_EOS;
  }

  if (self->flushing)
    goto flushing;

  if (self->downstream_flow_ret != GST_FLOW_OK)
    goto downstream_error;

  if (!inbuf)
    return gst_amc_audio_dec_drain (self);

  timestamp = GST_BUFFER_PTS (inbuf);
  duration = GST_BUFFER_DURATION (inbuf);

  gst_buffer_map (inbuf, &minfo, GST_MAP_READ);

  while (offset < minfo.size) {
    /* Make sure to release the base class stream lock, otherwise
     * _loop() can't call _finish_frame() and we might block forever
     * because no input buffers are released */
    GST_AUDIO_DECODER_STREAM_UNLOCK (self);
    /* Wait at most 100ms here, some codecs don't fail dequeueing if
     * the codec is flushing, causing deadlocks during shutdown */
    idx = gst_amc_codec_dequeue_input_buffer (self->codec, 100000);
    GST_AUDIO_DECODER_STREAM_LOCK (self);

    if (idx < 0) {
      if (self->flushing)
        goto flushing;
      switch (idx) {
        case INFO_TRY_AGAIN_LATER:
          GST_DEBUG_OBJECT (self, "Dequeueing input buffer timed out");
          continue;             /* next try */
          break;
        case G_MININT:
          GST_ERROR_OBJECT (self, "Failed to dequeue input buffer");
          goto dequeue_error;
        default:
          g_assert_not_reached ();
          break;
      }

      continue;
    }

    if (idx >= self->n_input_buffers)
      goto invalid_buffer_index;

    if (self->flushing)
      goto flushing;

    if (self->downstream_flow_ret != GST_FLOW_OK) {
      memset (&buffer_info, 0, sizeof (buffer_info));
      gst_amc_codec_queue_input_buffer (self->codec, idx, &buffer_info);
      goto downstream_error;
    }

    /* Now handle the frame */

    /* Copy the buffer content in chunks of size as requested
     * by the port */
    buf = &self->input_buffers[idx];

    memset (&buffer_info, 0, sizeof (buffer_info));
    buffer_info.offset = 0;
    buffer_info.size = MIN (minfo.size - offset, buf->size);

    orc_memcpy (buf->data, minfo.data + offset, buffer_info.size);

    /* Interpolate timestamps if we're passing the buffer
     * in multiple chunks */
    if (offset != 0 && duration != GST_CLOCK_TIME_NONE) {
      timestamp_offset = gst_util_uint64_scale (offset, duration, minfo.size);
    }

    if (timestamp != GST_CLOCK_TIME_NONE) {
      buffer_info.presentation_time_us =
          gst_util_uint64_scale (timestamp + timestamp_offset, 1, GST_USECOND);
      self->last_upstream_ts = timestamp + timestamp_offset;
    }
    if (duration != GST_CLOCK_TIME_NONE)
      self->last_upstream_ts += duration;

    if (offset == 0) {
      if (!GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_DELTA_UNIT))
        buffer_info.flags |= BUFFER_FLAG_SYNC_FRAME;
    }

    offset += buffer_info.size;
    GST_DEBUG_OBJECT (self,
        "Queueing buffer %d: size %d time %" G_GINT64_FORMAT " flags 0x%08x",
        idx, buffer_info.size, buffer_info.presentation_time_us,
        buffer_info.flags);
    if (!gst_amc_codec_queue_input_buffer (self->codec, idx, &buffer_info))
      goto queue_error;
  }
  gst_buffer_unmap (inbuf, &minfo);
  gst_buffer_unref (inbuf);

  return self->downstream_flow_ret;

downstream_error:
  {
    GST_ERROR_OBJECT (self, "Downstream returned %s",
        gst_flow_get_name (self->downstream_flow_ret));
    if (minfo.data)
      gst_buffer_unmap (inbuf, &minfo);
    if (inbuf)
      gst_buffer_unref (inbuf);
    return self->downstream_flow_ret;
  }
invalid_buffer_index:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL),
        ("Invalid input buffer index %d of %d", idx, self->n_input_buffers));
    if (minfo.data)
      gst_buffer_unmap (inbuf, &minfo);
    if (inbuf)
      gst_buffer_unref (inbuf);
    return GST_FLOW_ERROR;
  }
dequeue_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL),
        ("Failed to dequeue input buffer"));
    if (minfo.data)
      gst_buffer_unmap (inbuf, &minfo);
    if (inbuf)
      gst_buffer_unref (inbuf);
    return GST_FLOW_ERROR;
  }
queue_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL),
        ("Failed to queue input buffer"));
    if (minfo.data)
      gst_buffer_unmap (inbuf, &minfo);
    if (inbuf)
      gst_buffer_unref (inbuf);
    return GST_FLOW_ERROR;
  }
flushing:
  {
    GST_DEBUG_OBJECT (self, "Flushing -- returning FLUSHING");
    if (minfo.data)
      gst_buffer_unmap (inbuf, &minfo);
    if (inbuf)
      gst_buffer_unref (inbuf);
    return GST_FLOW_FLUSHING;
  }
}
static gboolean
gst_base_video_parse_src_query (GstPad * pad, GstQuery * query)
{
  GstBaseVideoParse *base_parse;
  gboolean res = FALSE;

  base_parse = GST_BASE_VIDEO_PARSE (gst_pad_get_parent (pad));

  switch (GST_QUERY_TYPE (query)) {
    case GST_QUERY_POSITION:
    {
      GstFormat format;
      gint64 time;
      gint64 value;

      gst_query_parse_position (query, &format, NULL);

      time = gst_util_uint64_scale (base_parse->presentation_frame_number,
          base_parse->state.fps_n, base_parse->state.fps_d);
      time += base_parse->state.segment.time;
      GST_DEBUG ("query position %lld", time);
      res = gst_base_video_encoded_video_convert (&base_parse->state,
          GST_FORMAT_TIME, time, &format, &value);
      if (!res)
        goto error;

      gst_query_set_position (query, format, value);
      break;
    }
    case GST_QUERY_DURATION:
      res =
          gst_pad_query (GST_PAD_PEER (GST_BASE_VIDEO_CODEC_SINK_PAD
              (base_parse)), query);
      if (!res)
        goto error;
      break;
    case GST_QUERY_CONVERT:
    {
      GstFormat src_fmt, dest_fmt;
      gint64 src_val, dest_val;

      GST_WARNING ("query convert");

      gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
      res = gst_base_video_encoded_video_convert (&base_parse->state,
          src_fmt, src_val, &dest_fmt, &dest_val);
      if (!res)
        goto error;
      gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
      break;
    }
    default:
      res = gst_pad_query_default (pad, query);
      break;
  }
done:
  gst_object_unref (base_parse);

  return res;
error:
  GST_DEBUG_OBJECT (base_parse, "query failed");
  goto done;
}
static GstFlowReturn
gst_amc_audio_dec_drain (GstAmcAudioDec * self)
{
  GstFlowReturn ret;
  gint idx;

  GST_DEBUG_OBJECT (self, "Draining codec");
  if (!self->started) {
    GST_DEBUG_OBJECT (self, "Codec not started yet");
    return GST_FLOW_OK;
  }

  /* Don't send EOS buffer twice, this doesn't work */
  if (self->eos) {
    GST_DEBUG_OBJECT (self, "Codec is EOS already");
    return GST_FLOW_OK;
  }

  /* Make sure to release the base class stream lock, otherwise
   * _loop() can't call _finish_frame() and we might block forever
   * because no input buffers are released */
  GST_AUDIO_DECODER_STREAM_UNLOCK (self);
  /* Send an EOS buffer to the component and let the base
   * class drop the EOS event. We will send it later when
   * the EOS buffer arrives on the output port.
   * Wait at most 0.5s here. */
  idx = gst_amc_codec_dequeue_input_buffer (self->codec, 500000);
  GST_AUDIO_DECODER_STREAM_LOCK (self);

  if (idx >= 0 && idx < self->n_input_buffers) {
    GstAmcBufferInfo buffer_info;

    GST_AUDIO_DECODER_STREAM_UNLOCK (self);
    g_mutex_lock (&self->drain_lock);
    self->draining = TRUE;

    memset (&buffer_info, 0, sizeof (buffer_info));
    buffer_info.size = 0;
    buffer_info.presentation_time_us =
        gst_util_uint64_scale (self->last_upstream_ts, 1, GST_USECOND);
    buffer_info.flags |= BUFFER_FLAG_END_OF_STREAM;

    if (gst_amc_codec_queue_input_buffer (self->codec, idx, &buffer_info)) {
      GST_DEBUG_OBJECT (self, "Waiting until codec is drained");
      g_cond_wait (&self->drain_cond, &self->drain_lock);
      GST_DEBUG_OBJECT (self, "Drained codec");
      ret = GST_FLOW_OK;
    } else {
      GST_ERROR_OBJECT (self, "Failed to queue input buffer");
      ret = GST_FLOW_ERROR;
    }

    g_mutex_unlock (&self->drain_lock);
    GST_AUDIO_DECODER_STREAM_LOCK (self);
  } else if (idx >= self->n_input_buffers) {
    GST_ERROR_OBJECT (self, "Invalid input buffer index %d of %d",
        idx, self->n_input_buffers);
    ret = GST_FLOW_ERROR;
  } else {
    GST_ERROR_OBJECT (self, "Failed to acquire buffer for EOS: %d", idx);
    ret = GST_FLOW_ERROR;
  }

  return ret;
}
GstFlowReturn
gst_base_video_parse_finish_frame (GstBaseVideoParse * base_video_parse)
{
  GstVideoFrame *frame = base_video_parse->current_frame;
  GstBuffer *buffer;
  GstBaseVideoParseClass *base_video_parse_class;
  GstFlowReturn ret;

  GST_DEBUG ("finish_frame");

  base_video_parse_class = GST_BASE_VIDEO_PARSE_GET_CLASS (base_video_parse);

  buffer = gst_adapter_take_buffer (base_video_parse->output_adapter,
      gst_adapter_available (base_video_parse->output_adapter));

  if (frame->is_sync_point) {
    base_video_parse->timestamp_offset = base_video_parse->last_timestamp -
        gst_util_uint64_scale (frame->presentation_frame_number,
        base_video_parse->state.fps_d * GST_SECOND,
        base_video_parse->state.fps_n);
    base_video_parse->distance_from_sync = 0;
  }

  frame->distance_from_sync = base_video_parse->distance_from_sync;
  base_video_parse->distance_from_sync++;

  frame->presentation_timestamp =
      gst_base_video_parse_get_timestamp (base_video_parse,
      frame->presentation_frame_number);
  frame->presentation_duration =
      gst_base_video_parse_get_timestamp (base_video_parse,
      frame->presentation_frame_number + 1) - frame->presentation_timestamp;
  frame->decode_timestamp =
      gst_base_video_parse_get_timestamp (base_video_parse,
      frame->decode_frame_number);

  GST_BUFFER_TIMESTAMP (buffer) = frame->presentation_timestamp;
  GST_BUFFER_DURATION (buffer) = frame->presentation_duration;
  if (frame->decode_frame_number < 0) {
    GST_BUFFER_OFFSET (buffer) = 0;
  } else {
    GST_BUFFER_OFFSET (buffer) = frame->decode_timestamp;
  }
  GST_BUFFER_OFFSET_END (buffer) = GST_CLOCK_TIME_NONE;

  GST_DEBUG ("pts %" GST_TIME_FORMAT,
      GST_TIME_ARGS (frame->presentation_timestamp));
  GST_DEBUG ("dts %" GST_TIME_FORMAT, GST_TIME_ARGS (frame->decode_timestamp));
  GST_DEBUG ("dist %d", frame->distance_from_sync);

  if (frame->is_sync_point) {
    GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DELTA_UNIT);
  } else {
    GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT);
  }

  frame->src_buffer = buffer;
  ret = base_video_parse_class->shape_output (base_video_parse, frame);

  gst_base_video_parse_free_frame (base_video_parse->current_frame);

  /* create new frame */
  base_video_parse->current_frame =
      gst_base_video_parse_new_frame (base_video_parse);

  return ret;
}
/* Called with the object lock for both the element and pad held,
 * as well as the aagg lock
 */
static gboolean
gst_audio_aggregator_fill_buffer (GstAudioAggregator * aagg,
    GstAudioAggregatorPad * pad, GstBuffer * inbuf)
{
  GstClockTime start_time, end_time;
  gboolean discont = FALSE;
  guint64 start_offset, end_offset;
  gint rate, bpf;

  GstAggregator *agg = GST_AGGREGATOR (aagg);
  GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);

  g_assert (pad->priv->buffer == NULL);

  rate = GST_AUDIO_INFO_RATE (&pad->info);
  bpf = GST_AUDIO_INFO_BPF (&pad->info);

  pad->priv->position = 0;
  pad->priv->size = gst_buffer_get_size (inbuf) / bpf;

  if (!GST_BUFFER_PTS_IS_VALID (inbuf)) {
    if (pad->priv->output_offset == -1)
      pad->priv->output_offset = aagg->priv->offset;
    if (pad->priv->next_offset == -1)
      pad->priv->next_offset = pad->priv->size;
    else
      pad->priv->next_offset += pad->priv->size;
    goto done;
  }

  start_time = GST_BUFFER_PTS (inbuf);
  end_time =
      start_time + gst_util_uint64_scale_ceil (pad->priv->size, GST_SECOND,
      rate);

  /* Clipping should've ensured this */
  g_assert (start_time >= aggpad->segment.start);

  start_offset =
      gst_util_uint64_scale (start_time - aggpad->segment.start, rate,
      GST_SECOND);
  end_offset = start_offset + pad->priv->size;

  if (GST_BUFFER_IS_DISCONT (inbuf)
      || GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_RESYNC)
      || pad->priv->new_segment || pad->priv->next_offset == -1) {
    discont = TRUE;
    pad->priv->new_segment = FALSE;
  } else {
    guint64 diff, max_sample_diff;

    /* Check discont, based on audiobasesink */
    if (start_offset <= pad->priv->next_offset)
      diff = pad->priv->next_offset - start_offset;
    else
      diff = start_offset - pad->priv->next_offset;

    max_sample_diff =
        gst_util_uint64_scale_int (aagg->priv->alignment_threshold, rate,
        GST_SECOND);

    /* Discont! */
    if (G_UNLIKELY (diff >= max_sample_diff)) {
      if (aagg->priv->discont_wait > 0) {
        if (pad->priv->discont_time == GST_CLOCK_TIME_NONE) {
          pad->priv->discont_time = start_time;
        } else if (start_time - pad->priv->discont_time >=
            aagg->priv->discont_wait) {
          discont = TRUE;
          pad->priv->discont_time = GST_CLOCK_TIME_NONE;
        }
      } else {
        discont = TRUE;
      }
    } else if (G_UNLIKELY (pad->priv->discont_time != GST_CLOCK_TIME_NONE)) {
      /* we have had a discont, but are now back on track! */
      pad->priv->discont_time = GST_CLOCK_TIME_NONE;
    }
  }

  if (discont) {
    /* Have discont, need resync */
    if (pad->priv->next_offset != -1)
      GST_INFO_OBJECT (pad, "Have discont. Expected %"
          G_GUINT64_FORMAT ", got %" G_GUINT64_FORMAT,
          pad->priv->next_offset, start_offset);
    pad->priv->output_offset = -1;
    pad->priv->next_offset = end_offset;
  } else {
    pad->priv->next_offset += pad->priv->size;
  }

  if (pad->priv->output_offset == -1) {
    GstClockTime start_running_time;
    GstClockTime end_running_time;
    guint64 start_output_offset;
    guint64 end_output_offset;

    start_running_time =
        gst_segment_to_running_time (&aggpad->segment,
        GST_FORMAT_TIME, start_time);
    end_running_time =
        gst_segment_to_running_time (&aggpad->segment,
        GST_FORMAT_TIME, end_time);

    /* Convert to position in the output segment */
    start_output_offset =
        gst_segment_to_position (&agg->segment, GST_FORMAT_TIME,
        start_running_time);
    if (start_output_offset != -1)
      start_output_offset =
          gst_util_uint64_scale (start_output_offset - agg->segment.start, rate,
          GST_SECOND);

    end_output_offset =
        gst_segment_to_position (&agg->segment, GST_FORMAT_TIME,
        end_running_time);
    if (end_output_offset != -1)
      end_output_offset =
          gst_util_uint64_scale (end_output_offset - agg->segment.start, rate,
          GST_SECOND);

    if (start_output_offset == -1 && end_output_offset == -1) {
      /* Outside output segment, drop */
      gst_buffer_unref (inbuf);
      pad->priv->buffer = NULL;
      pad->priv->position = 0;
      pad->priv->size = 0;
      pad->priv->output_offset = -1;
      GST_DEBUG_OBJECT (pad, "Buffer outside output segment");
      return FALSE;
    }

    /* Calculate end_output_offset if it was outside the output segment */
    if (end_output_offset == -1)
      end_output_offset = start_output_offset + pad->priv->size;

    if (end_output_offset < aagg->priv->offset) {
      /* Before output segment, drop */
      gst_buffer_unref (inbuf);
      pad->priv->buffer = NULL;
      pad->priv->position = 0;
      pad->priv->size = 0;
      pad->priv->output_offset = -1;
      GST_DEBUG_OBJECT (pad,
          "Buffer before segment or current position: %" G_GUINT64_FORMAT " < %"
          G_GINT64_FORMAT, end_output_offset, aagg->priv->offset);
      return FALSE;
    }

    if (start_output_offset == -1 || start_output_offset < aagg->priv->offset) {
      guint diff;

      if (start_output_offset == -1 && end_output_offset < pad->priv->size) {
        diff = pad->priv->size - end_output_offset + aagg->priv->offset;
      } else if (start_output_offset == -1) {
        start_output_offset = end_output_offset - pad->priv->size;

        if (start_output_offset < aagg->priv->offset)
          diff = aagg->priv->offset - start_output_offset;
        else
          diff = 0;
      } else {
        diff = aagg->priv->offset - start_output_offset;
      }

      pad->priv->position += diff;
      if (pad->priv->position >= pad->priv->size) {
        /* Empty buffer, drop */
        gst_buffer_unref (inbuf);
        pad->priv->buffer = NULL;
        pad->priv->position = 0;
        pad->priv->size = 0;
        pad->priv->output_offset = -1;
        GST_DEBUG_OBJECT (pad,
            "Buffer before segment or current position: %" G_GUINT64_FORMAT
            " < %" G_GINT64_FORMAT, end_output_offset, aagg->priv->offset);
        return FALSE;
      }
    }

    if (start_output_offset == -1 || start_output_offset < aagg->priv->offset)
      pad->priv->output_offset = aagg->priv->offset;
    else
      pad->priv->output_offset = start_output_offset;

    GST_DEBUG_OBJECT (pad,
        "Buffer resynced: Pad offset %" G_GUINT64_FORMAT
        ", current audio aggregator offset %" G_GINT64_FORMAT,
        pad->priv->output_offset, aagg->priv->offset);
  }

done:

  GST_LOG_OBJECT (pad,
      "Queued new buffer at offset %" G_GUINT64_FORMAT,
      pad->priv->output_offset);
  pad->priv->buffer = inbuf;

  return TRUE;
}
Exemple #20
0
static gboolean
get_video_recv_info (KmsRembLocal * rl,
    guint64 * bitrate, guint * fraction_lost)
{
  GValueArray *arr = NULL;
  GValue *val;
  guint i;
  gboolean ret = FALSE;

  if (!KMS_REMB_BASE (rl)->rtpsess) {
    GST_WARNING ("Session object does not exist");
    return ret;
  }

  g_object_get (KMS_REMB_BASE (rl)->rtpsess, "sources", &arr, NULL);

  if (arr == NULL) {
    GST_WARNING ("Sources array not found");
    return ret;
  }

  for (i = 0; i < arr->n_values; i++) {
    GObject *source;
    guint ssrc;
    GstStructure *s;

    val = g_value_array_get_nth (arr, i);
    source = g_value_get_object (val);
    g_object_get (source, "ssrc", &ssrc, "stats", &s, NULL);

    GST_TRACE_OBJECT (source, "source ssrc: %u", ssrc);
    GST_TRACE_OBJECT (KMS_REMB_BASE (rl)->rtpsess, "stats: %" GST_PTR_FORMAT,
        s);

    if (ssrc == rl->remote_ssrc) {
      GstClockTime current_time;
      guint64 octets_received;

      if (!gst_structure_get_uint64 (s, "bitrate", bitrate)) {
        break;
      }
      if (!gst_structure_get_uint64 (s, "octets-received", &octets_received)) {
        break;
      }
      if (!gst_structure_get_uint (s, "sent-rb-fractionlost", fraction_lost)) {
        break;
      }

      current_time = kms_utils_get_time_nsecs ();

      if (rl->last_time != 0) {
        GstClockTime elapsed = current_time - rl->last_time;
        guint64 bytes_handled = octets_received - rl->last_octets_received;

        *bitrate =
            gst_util_uint64_scale (bytes_handled, 8 * GST_SECOND, elapsed);
        GST_TRACE_OBJECT (KMS_REMB_BASE (rl)->rtpsess,
            "Elapsed %" G_GUINT64_FORMAT " bytes %" G_GUINT64_FORMAT ", rate %"
            G_GUINT64_FORMAT, elapsed, bytes_handled, *bitrate);
      }

      rl->last_time = current_time;
      rl->last_octets_received = octets_received;

      ret = TRUE;
    }

    gst_structure_free (s);

    if (ret) {
      break;
    }
  }

  g_value_array_free (arr);

  return ret;
}
static GstFlowReturn
gst_amc_video_dec_drain (GstAmcVideoDec * self)
{
  GstFlowReturn ret;
  gint idx;
  GError *err = NULL;

  GST_DEBUG_OBJECT (self, "Draining codec");
  if (!self->started) {
    GST_DEBUG_OBJECT (self, "Codec not started yet");
    return GST_FLOW_OK;
  }

  /* Don't send drain buffer twice, this doesn't work */
  if (self->drained) {
    GST_DEBUG_OBJECT (self, "Codec is drained already");
    return GST_FLOW_OK;
  }

  /* Make sure to release the base class stream lock, otherwise
   * _loop() can't call _finish_frame() and we might block forever
   * because no input buffers are released */
  GST_VIDEO_DECODER_STREAM_UNLOCK (self);
  /* Send an EOS buffer to the component and let the base
   * class drop the EOS event. We will send it later when
   * the EOS buffer arrives on the output port.
   * Wait at most 0.5s here. */
  idx = gst_amc_codec_dequeue_input_buffer (self->codec, 500000, &err);
  GST_VIDEO_DECODER_STREAM_LOCK (self);

  if (idx >= 0) {
    GstAmcBuffer *buf;
    GstAmcBufferInfo buffer_info;

    buf = gst_amc_codec_get_input_buffer (self->codec, idx, &err);
    if (buf) {
      GST_VIDEO_DECODER_STREAM_UNLOCK (self);
      g_mutex_lock (&self->drain_lock);
      self->draining = TRUE;

      memset (&buffer_info, 0, sizeof (buffer_info));
      buffer_info.size = 0;
      buffer_info.presentation_time_us =
          gst_util_uint64_scale (self->last_upstream_ts, 1, GST_USECOND);
      buffer_info.flags |= BUFFER_FLAG_END_OF_STREAM;

      gst_amc_buffer_set_position_and_limit (buf, NULL, 0, 0);
      gst_amc_buffer_free (buf);
      buf = NULL;

      if (gst_amc_codec_queue_input_buffer (self->codec, idx, &buffer_info,
              &err)) {
        GST_DEBUG_OBJECT (self, "Waiting until codec is drained");
        g_cond_wait (&self->drain_cond, &self->drain_lock);
        GST_DEBUG_OBJECT (self, "Drained codec");
        ret = GST_FLOW_OK;
      } else {
        GST_ERROR_OBJECT (self, "Failed to queue input buffer");
        if (self->flushing) {
          g_clear_error (&err);
          ret = GST_FLOW_FLUSHING;
        } else {
          GST_ELEMENT_WARNING_FROM_ERROR (self, err);
          ret = GST_FLOW_ERROR;
        }
      }

      self->drained = TRUE;
      self->draining = FALSE;
      g_mutex_unlock (&self->drain_lock);
      GST_VIDEO_DECODER_STREAM_LOCK (self);
    } else {
      GST_ERROR_OBJECT (self, "Failed to get buffer for EOS: %d", idx);
      if (err)
        GST_ELEMENT_WARNING_FROM_ERROR (self, err);
      ret = GST_FLOW_ERROR;
    }
  } else {
    GST_ERROR_OBJECT (self, "Failed to acquire buffer for EOS: %d", idx);
    if (err)
      GST_ELEMENT_WARNING_FROM_ERROR (self, err);
    ret = GST_FLOW_ERROR;
  }

  return ret;
}
static GstFlowReturn
gst_video_segment_clip_clip_buffer (GstSegmentClip * base, GstBuffer * buffer,
    GstBuffer ** outbuf)
{
  GstVideoSegmentClip *self = GST_VIDEO_SEGMENT_CLIP (base);
  GstSegment *segment = &base->segment;
  GstClockTime timestamp, duration;
  guint64 cstart, cstop;
  gboolean in_seg;

  if (!self->fps_d) {
    GST_ERROR_OBJECT (self, "Not negotiated yet");
    gst_buffer_unref (buffer);
    return GST_FLOW_NOT_NEGOTIATED;
  }

  if (segment->format != GST_FORMAT_TIME) {
    GST_DEBUG_OBJECT (self, "Unsupported segment format %s",
        gst_format_get_name (segment->format));
    *outbuf = buffer;
    return GST_FLOW_OK;
  }

  if (!GST_BUFFER_TIMESTAMP_IS_VALID (buffer)) {
    GST_WARNING_OBJECT (self, "Buffer without valid timestamp");
    *outbuf = buffer;
    return GST_FLOW_OK;
  }

  if (self->fps_n == 0) {
    *outbuf = buffer;
    return GST_FLOW_OK;
  }

  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  duration = GST_BUFFER_DURATION (buffer);
  if (!GST_CLOCK_TIME_IS_VALID (duration))
    duration = gst_util_uint64_scale (GST_SECOND, self->fps_d, self->fps_n);

  in_seg =
      gst_segment_clip (segment, GST_FORMAT_TIME, timestamp,
      timestamp + duration, &cstart, &cstop);
  if (in_seg) {
    if (timestamp != cstart || timestamp + duration != cstop) {
      *outbuf = gst_buffer_make_writable (buffer);

      GST_BUFFER_TIMESTAMP (*outbuf) = cstart;
      GST_BUFFER_DURATION (*outbuf) = cstop - cstart;
    } else {
      *outbuf = buffer;
    }
  } else {
    GST_DEBUG_OBJECT (self, "Buffer outside the configured segment");

    if (segment->rate >= 0) {
      if (segment->stop != -1 && timestamp >= segment->stop)
        return GST_FLOW_EOS;
    } else {
      if (segment->start != -1 && timestamp + duration <= segment->start)
        return GST_FLOW_EOS;
    }
    gst_buffer_unref (buffer);
  }


  return GST_FLOW_OK;
}
static GstCaps *
gst_mfxpostproc_transform_caps_impl (GstBaseTransform * trans,
    GstPadDirection direction, GstCaps * caps)
{
  GstMfxPostproc *const vpp = GST_MFXPOSTPROC (trans);
  GstVideoInfo vi, peer_vi;
  GstVideoFormat out_format;
  GstCaps *out_caps, *peer_caps;
  GstMfxCapsFeature feature;
  const gchar *feature_str;
  guint width, height;

  /* Generate the sink pad caps, that could be fixated afterwards */
  if (direction == GST_PAD_SRC) {
    if (!ensure_allowed_sinkpad_caps (vpp))
      return NULL;
    return gst_caps_ref (vpp->allowed_sinkpad_caps);
  }

  /* Generate complete set of src pad caps if non-fixated sink pad
   * caps are provided */
  if (!gst_caps_is_fixed (caps)) {
    if (!ensure_allowed_srcpad_caps (vpp))
      return NULL;
    return gst_caps_ref (vpp->allowed_srcpad_caps);
  }

  /* Generate the expected src pad caps, from the current fixated
   * sink pad caps */
  if (!gst_video_info_from_caps (&vi, caps))
    return NULL;

  if (vpp->deinterlace_mode)
    GST_VIDEO_INFO_INTERLACE_MODE (&vi) = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;

  /* Update size from user-specified parameters */
  find_best_size (vpp, &vi, &width, &height);

  /* Update format from user-specified parameters */
  peer_caps = gst_pad_peer_query_caps (GST_BASE_TRANSFORM_SRC_PAD (trans),
      vpp->allowed_srcpad_caps);

  if (gst_caps_is_any (peer_caps) || gst_caps_is_empty (peer_caps))
    return peer_caps;
  if (!gst_caps_is_fixed (peer_caps))
    peer_caps = gst_caps_fixate (peer_caps);

  gst_video_info_from_caps (&peer_vi, peer_caps);
  out_format = GST_VIDEO_INFO_FPS_N (&peer_vi);

  /* Update width and height from the caps */
  if (GST_VIDEO_INFO_HEIGHT (&peer_vi) != 1 &&
      GST_VIDEO_INFO_WIDTH (&peer_vi) != 1)
    find_best_size(vpp, &peer_vi, &width, &height);

  if (vpp->format != DEFAULT_FORMAT)
    out_format = vpp->format;

  if (vpp->fps_n) {
    GST_VIDEO_INFO_FPS_N (&vi) = vpp->fps_n;
    GST_VIDEO_INFO_FPS_D (&vi) = vpp->fps_d;
    vpp->field_duration = gst_util_uint64_scale (GST_SECOND,
        vpp->fps_d, vpp->fps_n);
    if (DEFAULT_FRC_ALG == vpp->alg)
      vpp->alg = GST_MFX_FRC_PRESERVE_TIMESTAMP;
  }

  if (peer_caps)
    gst_caps_unref (peer_caps);

  feature =
      gst_mfx_find_preferred_caps_feature (GST_BASE_TRANSFORM_SRC_PAD (trans),
        &out_format);
  gst_video_info_change_format (&vi, out_format, width, height);

  out_caps = gst_video_info_to_caps (&vi);
  if (!out_caps)
    return NULL;


  if (feature) {
    feature_str = gst_mfx_caps_feature_to_string (feature);
    if (feature_str)
      gst_caps_set_features (out_caps, 0,
          gst_caps_features_new (feature_str, NULL));
  }

  if (vpp->format != out_format)
    vpp->format = out_format;

  return out_caps;
}
Exemple #24
0
static GstFlowReturn
gst_spectrum_transform_ip (GstBaseTransform * trans, GstBuffer * buffer)
{
  GstSpectrum *spectrum = GST_SPECTRUM (trans);
  guint rate = GST_AUDIO_FILTER_RATE (spectrum);
  guint channels = GST_AUDIO_FILTER_CHANNELS (spectrum);
  guint bps = GST_AUDIO_FILTER_BPS (spectrum);
  guint bpf = GST_AUDIO_FILTER_BPF (spectrum);
  guint output_channels = spectrum->multi_channel ? channels : 1;
  guint c;
  gfloat max_value = (1UL << ((bps << 3) - 1)) - 1;
  guint bands = spectrum->bands;
  guint nfft = 2 * bands - 2;
  guint input_pos;
  gfloat *input;
  GstMapInfo map;
  const guint8 *data;
  gsize size;
  guint fft_todo, msg_todo, block_size;
  gboolean have_full_interval;
  GstSpectrumChannel *cd;
  GstSpectrumInputData input_data;

  g_mutex_lock (&spectrum->lock);
  gst_buffer_map (buffer, &map, GST_MAP_READ);
  data = map.data;
  size = map.size;

  GST_LOG_OBJECT (spectrum, "input size: %" G_GSIZE_FORMAT " bytes", size);

  if (GST_BUFFER_IS_DISCONT (buffer)) {
    GST_DEBUG_OBJECT (spectrum, "Discontinuity detected -- flushing");
    gst_spectrum_flush (spectrum);
  }

  /* If we don't have a FFT context yet (or it was reset due to parameter
   * changes) get one and allocate memory for everything
   */
  if (spectrum->channel_data == NULL) {
    GST_DEBUG_OBJECT (spectrum, "allocating for bands %u", bands);

    gst_spectrum_alloc_channel_data (spectrum);

    /* number of sample frames we process before posting a message
     * interval is in ns */
    spectrum->frames_per_interval =
        gst_util_uint64_scale (spectrum->interval, rate, GST_SECOND);
    spectrum->frames_todo = spectrum->frames_per_interval;
    /* rounding error for frames_per_interval in ns,
     * aggregated it in accumulated_error */
    spectrum->error_per_interval = (spectrum->interval * rate) % GST_SECOND;
    if (spectrum->frames_per_interval == 0)
      spectrum->frames_per_interval = 1;

    GST_INFO_OBJECT (spectrum, "interval %" GST_TIME_FORMAT ", fpi %"
        G_GUINT64_FORMAT ", error %" GST_TIME_FORMAT,
        GST_TIME_ARGS (spectrum->interval), spectrum->frames_per_interval,
        GST_TIME_ARGS (spectrum->error_per_interval));

    spectrum->input_pos = 0;

    gst_spectrum_flush (spectrum);
  }

  if (spectrum->num_frames == 0)
    spectrum->message_ts = GST_BUFFER_TIMESTAMP (buffer);

  input_pos = spectrum->input_pos;
  input_data = spectrum->input_data;

  while (size >= bpf) {
    /* run input_data for a chunk of data */
    fft_todo = nfft - (spectrum->num_frames % nfft);
    msg_todo = spectrum->frames_todo - spectrum->num_frames;
    GST_LOG_OBJECT (spectrum,
        "message frames todo: %u, fft frames todo: %u, input frames %"
        G_GSIZE_FORMAT, msg_todo, fft_todo, (size / bpf));
    block_size = msg_todo;
    if (block_size > (size / bpf))
      block_size = (size / bpf);
    if (block_size > fft_todo)
      block_size = fft_todo;

    for (c = 0; c < output_channels; c++) {
      cd = &spectrum->channel_data[c];
      input = cd->input;
      /* Move the current frames into our ringbuffers */
      input_data (data + c * bps, input, block_size, channels, max_value,
          input_pos, nfft);
    }
    data += block_size * bpf;
    size -= block_size * bpf;
    input_pos = (input_pos + block_size) % nfft;
    spectrum->num_frames += block_size;

    have_full_interval = (spectrum->num_frames == spectrum->frames_todo);

    GST_LOG_OBJECT (spectrum,
        "size: %" G_GSIZE_FORMAT ", do-fft = %d, do-message = %d", size,
        (spectrum->num_frames % nfft == 0), have_full_interval);

    /* If we have enough frames for an FFT or we have all frames required for
     * the interval and we haven't run a FFT, then run an FFT */
    if ((spectrum->num_frames % nfft == 0) ||
        (have_full_interval && !spectrum->num_fft)) {
      for (c = 0; c < output_channels; c++) {
        cd = &spectrum->channel_data[c];
        gst_spectrum_run_fft (spectrum, cd, input_pos);
      }
      spectrum->num_fft++;
    }

    /* Do we have the FFTs for one interval? */
    if (have_full_interval) {
      GST_DEBUG_OBJECT (spectrum, "nfft: %u frames: %" G_GUINT64_FORMAT
          " fpi: %" G_GUINT64_FORMAT " error: %" GST_TIME_FORMAT, nfft,
          spectrum->num_frames, spectrum->frames_per_interval,
          GST_TIME_ARGS (spectrum->accumulated_error));

      spectrum->frames_todo = spectrum->frames_per_interval;
      if (spectrum->accumulated_error >= GST_SECOND) {
        spectrum->accumulated_error -= GST_SECOND;
        spectrum->frames_todo++;
      }
      spectrum->accumulated_error += spectrum->error_per_interval;

      if (spectrum->post_messages) {
        GstMessage *m;

        for (c = 0; c < output_channels; c++) {
          cd = &spectrum->channel_data[c];
          gst_spectrum_prepare_message_data (spectrum, cd);
        }

        m = gst_spectrum_message_new (spectrum, spectrum->message_ts,
            spectrum->interval);

        gst_element_post_message (GST_ELEMENT (spectrum), m);
      }

      if (GST_CLOCK_TIME_IS_VALID (spectrum->message_ts))
        spectrum->message_ts +=
            gst_util_uint64_scale (spectrum->num_frames, GST_SECOND, rate);

      for (c = 0; c < output_channels; c++) {
        cd = &spectrum->channel_data[c];
        gst_spectrum_reset_message_data (spectrum, cd);
      }
      spectrum->num_frames = 0;
      spectrum->num_fft = 0;
    }
  }

  spectrum->input_pos = input_pos;

  gst_buffer_unmap (buffer, &map);
  g_mutex_unlock (&spectrum->lock);

  g_assert (size == 0);

  return GST_FLOW_OK;
}
static GstFlowReturn
handle_sequence (GstMpeg2dec * mpeg2dec, const mpeg2_info_t * info)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstClockTime latency;
  const mpeg2_sequence_t *sequence;
  GstVideoCodecState *state;
  GstVideoInfo *vinfo;
  GstVideoFormat format;

  sequence = info->sequence;

  if (sequence->frame_period == 0)
    goto invalid_frame_period;

  /* mpeg2 video can only be from 16x16 to 4096x4096. Everything
   * else is a corrupted file */
  if (sequence->width > 4096 || sequence->width < 16 ||
      sequence->height > 4096 || sequence->height < 16)
    goto invalid_size;

  GST_DEBUG_OBJECT (mpeg2dec,
      "widthxheight: %dx%d , decoded_widthxheight: %dx%d",
      sequence->picture_width, sequence->picture_height, sequence->width,
      sequence->height);

  gst_video_alignment_reset (&mpeg2dec->valign);

  if (sequence->picture_width < sequence->width ||
      sequence->picture_height < sequence->height) {
    GST_DEBUG_OBJECT (mpeg2dec, "we need to crop");
    mpeg2dec->valign.padding_right = sequence->width - sequence->picture_width;
    mpeg2dec->valign.padding_bottom =
        sequence->height - sequence->picture_height;
    mpeg2dec->need_alignment = TRUE;
  } else if (sequence->picture_width == sequence->width ||
      sequence->picture_height == sequence->height) {
    GST_DEBUG_OBJECT (mpeg2dec, "no cropping needed");
    mpeg2dec->need_alignment = FALSE;
  } else {
    goto invalid_picture;
  }

  /* get subsampling */
  if (sequence->chroma_width < sequence->width) {
    /* horizontally subsampled */
    if (sequence->chroma_height < sequence->height) {
      /* and vertically subsamples */
      format = GST_VIDEO_FORMAT_I420;
    } else {
      format = GST_VIDEO_FORMAT_Y42B;
    }
  } else {
    /* not subsampled */
    format = GST_VIDEO_FORMAT_Y444;
  }

  state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (mpeg2dec),
      format, sequence->picture_width, sequence->picture_height,
      mpeg2dec->input_state);
  vinfo = &state->info;

  /* If we don't have a valid upstream PAR override it */
  if (GST_VIDEO_INFO_PAR_N (vinfo) == 1 &&
      GST_VIDEO_INFO_PAR_D (vinfo) == 1 &&
      sequence->pixel_width != 0 && sequence->pixel_height != 0) {
#if MPEG2_RELEASE >= MPEG2_VERSION(0,5,0)
    guint pixel_width, pixel_height;
    if (mpeg2_guess_aspect (sequence, &pixel_width, &pixel_height)) {
      vinfo->par_n = pixel_width;
      vinfo->par_d = pixel_height;
    }
#else
    vinfo->par_n = sequence->pixel_width;
    vinfo->par_d = sequence->pixel_height;
#endif
    GST_DEBUG_OBJECT (mpeg2dec, "Setting PAR %d x %d",
        vinfo->par_n, vinfo->par_d);
  }
  vinfo->fps_n = 27000000;
  vinfo->fps_d = sequence->frame_period;

  if (!(sequence->flags & SEQ_FLAG_PROGRESSIVE_SEQUENCE))
    vinfo->interlace_mode = GST_VIDEO_INTERLACE_MODE_MIXED;
  else
    vinfo->interlace_mode = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;

  vinfo->chroma_site = GST_VIDEO_CHROMA_SITE_MPEG2;
  vinfo->colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235;

  if (sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION) {
    /* do color description */
    switch (sequence->colour_primaries) {
      case 1:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT709;
        break;
      case 4:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470M;
        break;
      case 5:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG;
        break;
      case 6:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE170M;
        break;
      case 7:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE240M;
        break;
        /* 0 forbidden */
        /* 2 unspecified */
        /* 3 reserved */
        /* 8-255 reseved */
      default:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN;
        break;
    }
    /* matrix coefficients */
    switch (sequence->matrix_coefficients) {
      case 1:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT709;
        break;
      case 4:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_FCC;
        break;
      case 5:
      case 6:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601;
        break;
      case 7:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_SMPTE240M;
        break;
        /* 0 forbidden */
        /* 2 unspecified */
        /* 3 reserved */
        /* 8-255 reseved */
      default:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_UNKNOWN;
        break;
    }
    /* transfer characteristics */
    switch (sequence->transfer_characteristics) {
      case 1:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
        break;
      case 4:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA22;
        break;
      case 5:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA28;
        break;
      case 6:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
        break;
      case 7:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_SMPTE240M;
        break;
      case 8:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA10;
        break;
        /* 0 forbidden */
        /* 2 unspecified */
        /* 3 reserved */
        /* 9-255 reseved */
      default:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_UNKNOWN;
        break;
    }
  }

  GST_DEBUG_OBJECT (mpeg2dec,
      "sequence flags: %d, frame period: %d, frame rate: %d/%d",
      sequence->flags, sequence->frame_period, vinfo->fps_n, vinfo->fps_d);
  GST_DEBUG_OBJECT (mpeg2dec, "profile: %02x, colour_primaries: %d",
      sequence->profile_level_id, sequence->colour_primaries);
  GST_DEBUG_OBJECT (mpeg2dec, "transfer chars: %d, matrix coef: %d",
      sequence->transfer_characteristics, sequence->matrix_coefficients);
  GST_DEBUG_OBJECT (mpeg2dec,
      "FLAGS: CONSTRAINED_PARAMETERS:%d, PROGRESSIVE_SEQUENCE:%d",
      sequence->flags & SEQ_FLAG_CONSTRAINED_PARAMETERS,
      sequence->flags & SEQ_FLAG_PROGRESSIVE_SEQUENCE);
  GST_DEBUG_OBJECT (mpeg2dec, "FLAGS: LOW_DELAY:%d, COLOUR_DESCRIPTION:%d",
      sequence->flags & SEQ_FLAG_LOW_DELAY,
      sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION);

  /* Save the padded video information */
  mpeg2dec->decoded_info = *vinfo;
  gst_video_info_align (&mpeg2dec->decoded_info, &mpeg2dec->valign);

  /* Mpeg2dec has 2 frame latency to produce a picture and 1 frame latency in
   * it's parser */
  latency = gst_util_uint64_scale (3, vinfo->fps_d, vinfo->fps_n);
  gst_video_decoder_set_latency (GST_VIDEO_DECODER (mpeg2dec), latency,
      latency);

  if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (mpeg2dec)))
    goto negotiation_fail;

  gst_video_codec_state_unref (state);

  mpeg2_custom_fbuf (mpeg2dec->decoder, 1);

  init_dummybuf (mpeg2dec);

  /* Pump in some null buffers, because otherwise libmpeg2 doesn't
   * initialise the discard_fbuf->id */
  mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL);
  mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL);
  mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL);
  gst_mpeg2dec_clear_buffers (mpeg2dec);

  return ret;

invalid_frame_period:
  {
    GST_WARNING_OBJECT (mpeg2dec, "Frame period is 0!");
    return GST_FLOW_ERROR;
  }
invalid_size:
  {
    GST_ERROR_OBJECT (mpeg2dec, "Invalid frame dimensions: %d x %d",
        sequence->width, sequence->height);
    return GST_FLOW_ERROR;
  }

invalid_picture:
  {
    GST_ERROR_OBJECT (mpeg2dec, "Picture dimension bigger then frame: "
        "%d x %d is bigger then %d x %d", sequence->picture_width,
        sequence->picture_height, sequence->width, sequence->height);
    return GST_FLOW_ERROR;
  }


negotiation_fail:
  {
    GST_WARNING_OBJECT (mpeg2dec, "Failed to negotiate with downstream");
    gst_video_codec_state_unref (state);
    return GST_FLOW_ERROR;
  }
}
GstFlowReturn
gst_base_video_encoder_finish_frame (GstBaseVideoEncoder * base_video_encoder,
    GstVideoFrame * frame)
{
  GstFlowReturn ret;
  GstBaseVideoEncoderClass *base_video_encoder_class;

  base_video_encoder_class =
      GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder);

  if (frame->is_sync_point) {
    base_video_encoder->distance_from_sync = 0;
    GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
  } else {
    GST_BUFFER_FLAG_SET (frame->src_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
  }

  frame->distance_from_sync = base_video_encoder->distance_from_sync;
  base_video_encoder->distance_from_sync++;

  frame->decode_frame_number = frame->system_frame_number - 1;
  if (frame->decode_frame_number < 0) {
    frame->decode_timestamp = 0;
  } else {
    frame->decode_timestamp = gst_util_uint64_scale (frame->decode_frame_number,
        GST_SECOND * GST_BASE_VIDEO_CODEC (base_video_encoder)->state.fps_d,
        GST_BASE_VIDEO_CODEC (base_video_encoder)->state.fps_n);
  }

  GST_BUFFER_TIMESTAMP (frame->src_buffer) = frame->presentation_timestamp;
  GST_BUFFER_DURATION (frame->src_buffer) = frame->presentation_duration;
  GST_BUFFER_OFFSET (frame->src_buffer) = frame->decode_timestamp;

  GST_BASE_VIDEO_CODEC (base_video_encoder)->frames =
      g_list_remove (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames, frame);

  if (!base_video_encoder->set_output_caps) {
    if (base_video_encoder_class->get_caps) {
      GST_BASE_VIDEO_CODEC (base_video_encoder)->caps =
          base_video_encoder_class->get_caps (base_video_encoder);
    } else {
      GST_BASE_VIDEO_CODEC (base_video_encoder)->caps =
          gst_caps_new_simple ("video/unknown", NULL);
    }
    gst_pad_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder),
        GST_BASE_VIDEO_CODEC (base_video_encoder)->caps);
    base_video_encoder->set_output_caps = TRUE;
  }

  gst_buffer_set_caps (GST_BUFFER (frame->src_buffer),
      GST_BASE_VIDEO_CODEC (base_video_encoder)->caps);

  if (frame->force_keyframe) {
    GstClockTime stream_time;
    GstClockTime running_time;
    GstStructure *s;

    running_time =
        gst_segment_to_running_time (&GST_BASE_VIDEO_CODEC
        (base_video_encoder)->segment, GST_FORMAT_TIME,
        frame->presentation_timestamp);
    stream_time =
        gst_segment_to_stream_time (&GST_BASE_VIDEO_CODEC
        (base_video_encoder)->segment, GST_FORMAT_TIME,
        frame->presentation_timestamp);

    /* FIXME this should send the event that we got on the sink pad
       instead of creating a new one */
    s = gst_structure_new ("GstForceKeyUnit",
        "timestamp", G_TYPE_UINT64, frame->presentation_timestamp,
        "stream-time", G_TYPE_UINT64, stream_time,
        "running-time", G_TYPE_UINT64, running_time, NULL);

    gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder),
        gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, s));
  }

  if (base_video_encoder_class->shape_output) {
    ret = base_video_encoder_class->shape_output (base_video_encoder, frame);
  } else {
    ret =
        gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder),
        frame->src_buffer);
  }

  gst_base_video_codec_free_frame (frame);

  return ret;
}
Exemple #27
0
/* flush the oldest buffer */
static GstFlowReturn
gst_video_rate_flush_prev (GstVideoRate * videorate, gboolean duplicate)
{
  GstFlowReturn res;
  GstBuffer *outbuf;
  GstClockTime push_ts;

  if (!videorate->prevbuf)
    goto eos_before_buffers;

  /* make sure we can write to the metadata */
  outbuf = gst_buffer_make_metadata_writable
      (gst_buffer_ref (videorate->prevbuf));

  GST_BUFFER_OFFSET (outbuf) = videorate->out;
  GST_BUFFER_OFFSET_END (outbuf) = videorate->out + 1;

  if (videorate->discont) {
    GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
    videorate->discont = FALSE;
  } else
    GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_DISCONT);

  if (duplicate)
    GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);
  else
    GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_GAP);

  /* this is the timestamp we put on the buffer */
  push_ts = videorate->next_ts;

  videorate->out++;
  videorate->out_frame_count++;
  if (videorate->to_rate_numerator) {
    /* interpolate next expected timestamp in the segment */
    videorate->next_ts =
        videorate->segment.accum + videorate->segment.start +
        videorate->base_ts + gst_util_uint64_scale (videorate->out_frame_count,
        videorate->to_rate_denominator * GST_SECOND,
        videorate->to_rate_numerator);
    GST_BUFFER_DURATION (outbuf) = videorate->next_ts - push_ts;
  }

  /* We do not need to update time in VFR (variable frame rate) mode */
  if (!videorate->drop_only) {
    /* adapt for looping, bring back to time in current segment. */
    GST_BUFFER_TIMESTAMP (outbuf) = push_ts - videorate->segment.accum;
  }

  GST_LOG_OBJECT (videorate,
      "old is best, dup, pushing buffer outgoing ts %" GST_TIME_FORMAT,
      GST_TIME_ARGS (push_ts));

  res = gst_pad_push (GST_BASE_TRANSFORM_SRC_PAD (videorate), outbuf);

  return res;

  /* WARNINGS */
eos_before_buffers:
  {
    GST_INFO_OBJECT (videorate, "got EOS before any buffer was received");
    return GST_FLOW_OK;
  }
}
Exemple #28
0
static gboolean
gst_raw_parse_convert (GstRawParse * rp,
    GstFormat src_format, gint64 src_value,
    GstFormat dest_format, gint64 * dest_value)
{
  gboolean ret = FALSE;

  GST_DEBUG ("converting value %" G_GINT64_FORMAT " from %s (%d) to %s (%d)",
      src_value, gst_format_get_name (src_format), src_format,
      gst_format_get_name (dest_format), dest_format);

  if (src_format == dest_format) {
    *dest_value = src_value;
    ret = TRUE;
    goto done;
  }

  if (src_value == -1) {
    *dest_value = -1;
    ret = TRUE;
    goto done;
  }

  /* bytes to frames */
  if (src_format == GST_FORMAT_BYTES && dest_format == GST_FORMAT_DEFAULT) {
    if (rp->framesize != 0) {
      *dest_value = gst_util_uint64_scale_int (src_value, 1, rp->framesize);
    } else {
      GST_ERROR ("framesize is 0");
      *dest_value = 0;
    }
    ret = TRUE;
    goto done;
  }

  /* frames to bytes */
  if (src_format == GST_FORMAT_DEFAULT && dest_format == GST_FORMAT_BYTES) {
    *dest_value = gst_util_uint64_scale_int (src_value, rp->framesize, 1);
    ret = TRUE;
    goto done;
  }

  /* time to frames */
  if (src_format == GST_FORMAT_TIME && dest_format == GST_FORMAT_DEFAULT) {
    if (rp->fps_d != 0) {
      *dest_value = gst_util_uint64_scale (src_value,
          rp->fps_n, GST_SECOND * rp->fps_d);
    } else {
      GST_ERROR ("framerate denominator is 0");
      *dest_value = 0;
    }
    ret = TRUE;
    goto done;
  }

  /* frames to time */
  if (src_format == GST_FORMAT_DEFAULT && dest_format == GST_FORMAT_TIME) {
    if (rp->fps_n != 0) {
      *dest_value = gst_util_uint64_scale (src_value,
          GST_SECOND * rp->fps_d, rp->fps_n);
    } else {
      GST_ERROR ("framerate numerator is 0");
      *dest_value = 0;
    }
    ret = TRUE;
    goto done;
  }

  /* time to bytes */
  if (src_format == GST_FORMAT_TIME && dest_format == GST_FORMAT_BYTES) {
    if (rp->fps_d != 0) {
      *dest_value = gst_util_uint64_scale (src_value,
          rp->fps_n * rp->framesize, GST_SECOND * rp->fps_d);
    } else {
      GST_ERROR ("framerate denominator is 0");
      *dest_value = 0;
    }
    ret = TRUE;
    goto done;
  }

  /* bytes to time */
  if (src_format == GST_FORMAT_BYTES && dest_format == GST_FORMAT_TIME) {
    if (rp->fps_n != 0 && rp->framesize != 0) {
      *dest_value = gst_util_uint64_scale (src_value,
          GST_SECOND * rp->fps_d, rp->fps_n * rp->framesize);
    } else {
      GST_ERROR ("framerate denominator and/or framesize is 0");
      *dest_value = 0;
    }
    ret = TRUE;
  }

done:

  GST_DEBUG ("ret=%d result %" G_GINT64_FORMAT, ret, *dest_value);

  return ret;
}
/**
 * gst_base_video_encoder_finish_frame:
 * @base_video_encoder: a #GstBaseVideoEncoder
 * @frame: an encoded #GstVideoFrame 
 *
 * @frame must have a valid encoded data buffer, whose metadata fields
 * are then appropriately set according to frame data or no buffer at
 * all if the frame should be dropped.
 * It is subsequently pushed downstream or provided to @shape_output.
 * In any case, the frame is considered finished and released.
 *
 * Returns: a #GstFlowReturn resulting from sending data downstream
 */
GstFlowReturn
gst_base_video_encoder_finish_frame (GstBaseVideoEncoder * base_video_encoder,
    GstVideoFrame * frame)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstBaseVideoEncoderClass *base_video_encoder_class;
  GList *l;

  base_video_encoder_class =
      GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder);

  GST_LOG_OBJECT (base_video_encoder,
      "finish frame fpn %d", frame->presentation_frame_number);

  GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder);

  /* Push all pending events that arrived before this frame */
  for (l = base_video_encoder->base_video_codec.frames; l; l = l->next) {
    GstVideoFrame *tmp = l->data;

    if (tmp->events) {
      GList *k;

      for (k = g_list_last (tmp->events); k; k = k->prev)
        gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder),
            k->data);
      g_list_free (tmp->events);
      tmp->events = NULL;
    }

    if (tmp == frame)
      break;
  }

  if (frame->force_keyframe) {
    GstClockTime stream_time;
    GstClockTime running_time;
    GstEvent *ev;

    running_time =
        gst_segment_to_running_time (&GST_BASE_VIDEO_CODEC
        (base_video_encoder)->segment, GST_FORMAT_TIME,
        frame->presentation_timestamp);
    stream_time =
        gst_segment_to_stream_time (&GST_BASE_VIDEO_CODEC
        (base_video_encoder)->segment, GST_FORMAT_TIME,
        frame->presentation_timestamp);

    /* re-use upstream event if any so it also conveys any additional
     * info upstream arranged in there */
    GST_OBJECT_LOCK (base_video_encoder);
    if (base_video_encoder->force_keyunit_event) {
      ev = base_video_encoder->force_keyunit_event;
      base_video_encoder->force_keyunit_event = NULL;
    } else {
      ev = gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM,
          gst_structure_new ("GstForceKeyUnit", NULL));
    }
    GST_OBJECT_UNLOCK (base_video_encoder);

    gst_structure_set (ev->structure,
        "timestamp", G_TYPE_UINT64, frame->presentation_timestamp,
        "stream-time", G_TYPE_UINT64, stream_time,
        "running-time", G_TYPE_UINT64, running_time, NULL);

    gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), ev);
  }

  /* no buffer data means this frame is skipped/dropped */
  if (!frame->src_buffer) {
    GST_DEBUG_OBJECT (base_video_encoder, "skipping frame %" GST_TIME_FORMAT,
        GST_TIME_ARGS (frame->presentation_timestamp));
    goto done;
  }

  if (frame->is_sync_point) {
    GST_LOG_OBJECT (base_video_encoder, "key frame");
    base_video_encoder->distance_from_sync = 0;
    GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
  } else {
    GST_BUFFER_FLAG_SET (frame->src_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
  }

  frame->distance_from_sync = base_video_encoder->distance_from_sync;
  base_video_encoder->distance_from_sync++;

  frame->decode_frame_number = frame->system_frame_number - 1;
  if (frame->decode_frame_number < 0) {
    frame->decode_timestamp = 0;
  } else {
    frame->decode_timestamp = gst_util_uint64_scale (frame->decode_frame_number,
        GST_SECOND * GST_BASE_VIDEO_CODEC (base_video_encoder)->state.fps_d,
        GST_BASE_VIDEO_CODEC (base_video_encoder)->state.fps_n);
  }

  GST_BUFFER_TIMESTAMP (frame->src_buffer) = frame->presentation_timestamp;
  GST_BUFFER_DURATION (frame->src_buffer) = frame->presentation_duration;
  GST_BUFFER_OFFSET (frame->src_buffer) = frame->decode_timestamp;

  /* update rate estimate */
  GST_BASE_VIDEO_CODEC (base_video_encoder)->bytes +=
      GST_BUFFER_SIZE (frame->src_buffer);
  if (GST_CLOCK_TIME_IS_VALID (frame->presentation_duration)) {
    GST_BASE_VIDEO_CODEC (base_video_encoder)->time +=
        frame->presentation_duration;
  } else {
    /* better none than nothing valid */
    GST_BASE_VIDEO_CODEC (base_video_encoder)->time = GST_CLOCK_TIME_NONE;
  }

  if (G_UNLIKELY (GST_BASE_VIDEO_CODEC (base_video_encoder)->discont)) {
    GST_LOG_OBJECT (base_video_encoder, "marking discont");
    GST_BUFFER_FLAG_SET (frame->src_buffer, GST_BUFFER_FLAG_DISCONT);
    GST_BASE_VIDEO_CODEC (base_video_encoder)->discont = FALSE;
  }

  gst_buffer_set_caps (GST_BUFFER (frame->src_buffer),
      GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder)));

  if (base_video_encoder_class->shape_output) {
    ret = base_video_encoder_class->shape_output (base_video_encoder, frame);
  } else {
    ret =
        gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder),
        frame->src_buffer);
  }
  frame->src_buffer = NULL;

done:
  /* handed out */
  GST_BASE_VIDEO_CODEC (base_video_encoder)->frames =
      g_list_remove (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames, frame);

  gst_base_video_codec_free_frame (frame);

  GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder);

  return ret;
}
void
gst_decklink_video_src_convert_to_external_clock (GstDecklinkVideoSrc * self,
    GstClockTime * timestamp, GstClockTime * duration)
{
  GstClock *clock;

  g_assert (timestamp != NULL);

  if (*timestamp == GST_CLOCK_TIME_NONE)
    return;

  clock = gst_element_get_clock (GST_ELEMENT_CAST (self));
  if (clock && clock != self->input->clock) {
    GstClockTime internal, external, rate_n, rate_d;
    GstClockTimeDiff external_start_time_diff;

    gst_clock_get_calibration (self->input->clock, &internal, &external,
        &rate_n, &rate_d);

    if (rate_n != rate_d && self->internal_base_time != GST_CLOCK_TIME_NONE) {
      GstClockTime internal_timestamp = *timestamp;

      // Convert to the running time corresponding to both clock times
      internal -= self->internal_base_time;
      external -= self->external_base_time;

      // Get the difference in the internal time, note
      // that the capture time is internal time.
      // Then scale this difference and offset it to
      // our external time. Now we have the running time
      // according to our external clock.
      //
      // For the duration we just scale
      if (internal > internal_timestamp) {
        guint64 diff = internal - internal_timestamp;
        diff = gst_util_uint64_scale (diff, rate_n, rate_d);
        *timestamp = external - diff;
      } else {
        guint64 diff = internal_timestamp - internal;
        diff = gst_util_uint64_scale (diff, rate_n, rate_d);
        *timestamp = external + diff;
      }

      GST_LOG_OBJECT (self,
          "Converted %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT " (external: %"
          GST_TIME_FORMAT " internal %" GST_TIME_FORMAT " rate: %lf)",
          GST_TIME_ARGS (internal_timestamp), GST_TIME_ARGS (*timestamp),
          GST_TIME_ARGS (external), GST_TIME_ARGS (internal),
          ((gdouble) rate_n) / ((gdouble) rate_d));

      if (duration) {
        GstClockTime internal_duration = *duration;

        *duration = gst_util_uint64_scale (internal_duration, rate_d, rate_n);

        GST_LOG_OBJECT (self,
            "Converted duration %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
            " (external: %" GST_TIME_FORMAT " internal %" GST_TIME_FORMAT
            " rate: %lf)", GST_TIME_ARGS (internal_duration),
            GST_TIME_ARGS (*duration), GST_TIME_ARGS (external),
            GST_TIME_ARGS (internal), ((gdouble) rate_n) / ((gdouble) rate_d));
      }
    } else {
      GST_LOG_OBJECT (self, "No clock conversion needed, relative rate is 1.0");
    }

    // Add the diff between the external time when we
    // went to playing and the external time when the
    // pipeline went to playing. Otherwise we will
    // always start outputting from 0 instead of the
    // current running time.
    external_start_time_diff =
        gst_element_get_base_time (GST_ELEMENT_CAST (self));
    external_start_time_diff =
        self->external_base_time - external_start_time_diff;
    *timestamp += external_start_time_diff;
  } else {
    GST_LOG_OBJECT (self, "No clock conversion needed, same clocks");
  }
}