Example #1
0
void
MediaSourceReader::OnNotDecoded(MediaData::Type aType, RequestSampleCallback::NotDecodedReason aReason)
{
  MSE_DEBUG("MediaSourceReader(%p)::OnNotDecoded aType=%u aReason=%u IsEnded: %d", this, aType, aReason, IsEnded());
  if (aReason == RequestSampleCallback::DECODE_ERROR) {
    GetCallback()->OnNotDecoded(aType, aReason);
    return;
  }
  // End of stream. Force switching past this stream to another reader by
  // switching to the end of the buffered range.
  MOZ_ASSERT(aReason == RequestSampleCallback::END_OF_STREAM);
  nsRefPtr<MediaDecoderReader> reader = aType == MediaData::AUDIO_DATA ?
                                          mAudioReader : mVideoReader;

  // Find the closest approximation to the end time for this stream.
  // mLast{Audio,Video}Time differs from the actual end time because of
  // Bug 1065207 - the duration of a WebM fragment is an estimate not the
  // actual duration. In the case of audio time an example of where they
  // differ would be the actual sample duration being small but the
  // previous sample being large. The buffered end time uses that last
  // sample duration as an estimate of the end time duration giving an end
  // time that is greater than mLastAudioTime, which is the actual sample
  // end time.
  // Reader switching is based on the buffered end time though so they can be
  // quite different. By using the EOS_FUZZ_US and the buffered end time we
  // attempt to account for this difference.
  int64_t* time = aType == MediaData::AUDIO_DATA ? &mLastAudioTime : &mLastVideoTime;
  if (reader) {
    nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges();
    reader->GetBuffered(ranges);
    if (ranges->Length() > 0) {
      // End time is a double so we convert to nearest by adding 0.5.
      int64_t end = ranges->GetEndTime() * USECS_PER_S + 0.5;
      *time = std::max(*time, end);
    }
  }

  // See if we can find a different reader that can pick up where we left off. We use the
  // EOS_FUZZ_US to allow for the fact that our end time can be inaccurate due to bug
  // 1065207 - the duration of a WebM frame is an estimate.
  if (aType == MediaData::AUDIO_DATA && SwitchAudioReader(*time + EOS_FUZZ_US)) {
    RequestAudioData();
    return;
  }
  if (aType == MediaData::VIDEO_DATA && SwitchVideoReader(*time + EOS_FUZZ_US)) {
    RequestVideoData(false, 0);
    return;
  }

  // If the entire MediaSource is done, generate an EndOfStream.
  if (IsEnded()) {
    GetCallback()->OnNotDecoded(aType, RequestSampleCallback::END_OF_STREAM);
    return;
  }

  // We don't have the data the caller wants. Tell that we're waiting for JS to
  // give us more data.
  GetCallback()->OnNotDecoded(aType, RequestSampleCallback::WAITING_FOR_DATA);
}
void
AccurateSeekTask::OnNotDecoded(MediaData::Type aType,
                               MediaDecoderReader::NotDecodedReason aReason)
{
  AssertOwnerThread();
  MOZ_ASSERT(!mSeekTaskPromise.IsEmpty(), "Seek shouldn't be finished");

  SAMPLE_LOG("OnNotDecoded type=%d reason=%u", aType, aReason);

  // Ignore pending requests from video-only seek.
  if (aType == MediaData::AUDIO_DATA && mTarget.IsVideoOnly()) {
    return;
  }

  if (aReason == MediaDecoderReader::DECODE_ERROR) {
    // If this is a decode error, delegate to the generic error path.
    CancelCallbacks();
    RejectIfExist(__func__);
    return;
  }

  // If the decoder is waiting for data, we tell it to call us back when the
  // data arrives.
  if (aReason == MediaDecoderReader::WAITING_FOR_DATA) {
    mReader->WaitForData(aType);
    return;
  }

  if (aReason == MediaDecoderReader::CANCELED) {
    if (aType == MediaData::AUDIO_DATA) {
      RequestAudioData();
    } else {
      RequestVideoData();
    }
    return;
  }

  if (aReason == MediaDecoderReader::END_OF_STREAM) {
    if (aType == MediaData::AUDIO_DATA) {
      mIsAudioQueueFinished = true;
      mDoneAudioSeeking = true;
    } else {
      mIsVideoQueueFinished = true;
      mDoneVideoSeeking = true;
      if (mFirstVideoFrameAfterSeek) {
        // Hit the end of stream. Move mFirstVideoFrameAfterSeek into
        // mSeekedVideoData so we have something to display after seeking.
        mSeekedVideoData = mFirstVideoFrameAfterSeek.forget();
      }
    }
    MaybeFinishSeek();
  }
}
void
AccurateSeekTask::OnSeekResolved(media::TimeUnit)
{
  AssertOwnerThread();

  mSeekRequest.Complete();
  // We must decode the first samples of active streams, so we can determine
  // the new stream time. So dispatch tasks to do that.
  if (!mDoneVideoSeeking) {
    RequestVideoData();
  }
  if (!mDoneAudioSeeking) {
    RequestAudioData();
  }
}
Example #4
0
void
MediaSourceReader::OnAudioEOS()
{
  MSE_DEBUG("MediaSourceReader(%p)::OnAudioEOS reader=%p (decoders=%u)",
            this, mAudioReader.get(), mAudioTrack->Decoders().Length());
  if (SwitchAudioReader(mLastAudioTime)) {
    // Success! Resume decoding with next audio decoder.
    RequestAudioData();
  } else if (IsEnded()) {
    // End of stream.
    MSE_DEBUG("MediaSourceReader(%p)::OnAudioEOS reader=%p EOS (decoders=%u)",
              this, mAudioReader.get(), mAudioTrack->Decoders().Length());
    GetCallback()->OnAudioEOS();
  }
}
void
AccurateSeekTask::OnAudioDecoded(MediaData* aAudioSample)
{
  AssertOwnerThread();
  MOZ_ASSERT(!mSeekTaskPromise.IsEmpty(), "Seek shouldn't be finished");

  RefPtr<MediaData> audio(aAudioSample);
  MOZ_ASSERT(audio);

  // The MDSM::mDecodedAudioEndTime will be updated once the whole SeekTask is
  // resolved.

  SAMPLE_LOG("OnAudioDecoded [%lld,%lld] disc=%d",
    audio->mTime, audio->GetEndTime(), audio->mDiscontinuity);

  // Video-only seek doesn't reset audio decoder. There might be pending audio
  // requests when AccurateSeekTask::Seek() begins. We will just store the data
  // without checking |mDiscontinuity| or calling DropAudioUpToSeekTarget().
  if (mTarget.IsVideoOnly()) {
    mSeekedAudioData = audio.forget();
    return;
  }

  if (mFirstAudioSample) {
    mFirstAudioSample = false;
    MOZ_ASSERT(audio->mDiscontinuity);
  }

  AdjustFastSeekIfNeeded(audio);

  if (mTarget.IsFast()) {
    // Non-precise seek; we can stop the seek at the first sample.
    mSeekedAudioData = audio;
    mDoneAudioSeeking = true;
  } else if (NS_FAILED(DropAudioUpToSeekTarget(audio))) {
    CancelCallbacks();
    RejectIfExist(__func__);
    return;
  }

  if (!mDoneAudioSeeking) {
    RequestAudioData();
    return;
  }
  MaybeFinishSeek();
}
Example #6
0
nsresult
SeekTask::EnsureAudioDecodeTaskQueued()
{
  AssertOwnerThread();

  SAMPLE_LOG("EnsureAudioDecodeTaskQueued isDecoding=%d status=%s",
              IsAudioDecoding(), AudioRequestStatus());

  if (!IsAudioDecoding() ||
      mReader->IsRequestingAudioData() ||
      mReader->IsWaitingAudioData() ||
      mSeekRequest.Exists()) {
    return NS_OK;
  }

  RequestAudioData();
  return NS_OK;
}
void
AccurateSeekTask::SetCallbacks()
{
  AssertOwnerThread();

  mAudioCallback = mReader->AudioCallback().Connect(
    OwnerThread(), [this] (AudioCallbackData aData) {
    if (aData.is<MediaData*>()) {
      OnAudioDecoded(aData.as<MediaData*>());
    } else {
      OnNotDecoded(MediaData::AUDIO_DATA,
        aData.as<MediaDecoderReader::NotDecodedReason>());
    }
  });

  mVideoCallback = mReader->VideoCallback().Connect(
    OwnerThread(), [this] (VideoCallbackData aData) {
    typedef Tuple<MediaData*, TimeStamp> Type;
    if (aData.is<Type>()) {
      OnVideoDecoded(Get<0>(aData.as<Type>()));
    } else {
      OnNotDecoded(MediaData::VIDEO_DATA,
        aData.as<MediaDecoderReader::NotDecodedReason>());
    }
  });

  mAudioWaitCallback = mReader->AudioWaitCallback().Connect(
    OwnerThread(), [this] (WaitCallbackData aData) {
    // Ignore pending requests from video-only seek.
    if (mTarget.IsVideoOnly()) {
      return;
    }
    if (aData.is<MediaData::Type>()) {
      RequestAudioData();
    }
  });

  mVideoWaitCallback = mReader->VideoWaitCallback().Connect(
    OwnerThread(), [this] (WaitCallbackData aData) {
    if (aData.is<MediaData::Type>()) {
      RequestVideoData();
    }
  });
}