示例#1
0
void
MediaSourceReader::AttemptSeek()
{
  // Make sure we don't hold the monitor while calling into the reader
  // Seek methods since it can deadlock.
  {
    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
    if (!mWaitingForSeekData) {
      return;
    }

    mSeekToEnd = IsEnded() && mPendingSeekTime >= mMediaSourceDuration * USECS_PER_S;
    if (!mSeekToEnd && !TrackBuffersContainTime(mPendingSeekTime)) {
      mVideoSourceDecoder = nullptr;
      mAudioSourceDecoder = nullptr;
      return;
    }
    mWaitingForSeekData = false;
  }

  // Decoding discontinuity upon seek, reset last times to seek target.
  mLastAudioTime = mPendingSeekTime;
  mLastVideoTime = mPendingSeekTime;

  if (mVideoTrack) {
    DoVideoSeek();
  } else if (mAudioTrack) {
    DoAudioSeek();
  } else {
    MOZ_CRASH();
  }
}
示例#2
0
void
MediaSourceReader::MaybeNotifyHaveData()
{
  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  bool haveAudio = false, haveVideo = false;
  bool ended = IsEnded();
  // If we are in ended mode, we will resolve any pending wait promises.
  // The next Request*Data will handle END_OF_STREAM or going back into waiting
  // mode.
  if (!IsSeeking() && mAudioTrack) {
    if (!mLastAudioTime) {
      nsRefPtr<SourceBufferDecoder> d = FirstDecoder(MediaData::AUDIO_DATA);
      haveAudio = !!d;
    } else {
      haveAudio = HaveData(mLastAudioTime, MediaData::AUDIO_DATA);
    }
    if (ended || haveAudio) {
      WaitPromise(MediaData::AUDIO_DATA).ResolveIfExists(MediaData::AUDIO_DATA, __func__);
    }
  }
  if (!IsSeeking() && mVideoTrack) {
    if (!mLastVideoTime) {
      nsRefPtr<SourceBufferDecoder> d = FirstDecoder(MediaData::VIDEO_DATA);
      haveVideo = !!d;
    } else {
      haveVideo = HaveData(mLastVideoTime, MediaData::VIDEO_DATA);
    }
    if (ended || haveVideo) {
      WaitPromise(MediaData::VIDEO_DATA).ResolveIfExists(MediaData::VIDEO_DATA, __func__);
    }
  }
  MSE_DEBUG("isSeeking=%d haveAudio=%d, haveVideo=%d ended=%d",
            IsSeeking(), haveAudio, haveVideo, ended);
}
示例#3
0
media::TimeIntervals
MediaSourceReader::GetBuffered()
{
  MOZ_ASSERT(OnTaskQueue());
  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  media::TimeIntervals buffered;

  media::TimeUnit highestEndTime;
  nsTArray<media::TimeIntervals> activeRanges;
  // Must set the capacity of the nsTArray first: bug #1164444
  activeRanges.SetCapacity(mTrackBuffers.Length());

  for (const auto& trackBuffer : mTrackBuffers) {
    activeRanges.AppendElement(trackBuffer->Buffered());
    highestEndTime = std::max(highestEndTime, activeRanges.LastElement().GetEnd());
  }

  buffered +=
    media::TimeInterval(media::TimeUnit::FromMicroseconds(0), highestEndTime);

  for (auto& range : activeRanges) {
    if (IsEnded() && range.Length()) {
      // Set the end time on the last range to highestEndTime by adding a
      // new range spanning the current end time to highestEndTime, which
      // Normalize() will then merge with the old last range.
      range +=
        media::TimeInterval(range.GetEnd(), highestEndTime);
    }
    buffered.Intersection(range);
  }

  MSE_DEBUG("ranges=%s", DumpTimeRanges(buffered).get());
  return buffered;
}
示例#4
0
void
MediaSourceReader::OnNotDecoded(MediaData::Type aType, RequestSampleCallback::NotDecodedReason aReason)
{
  MSE_DEBUG("MediaSourceReader(%p)::OnNotDecoded aType=%u aReason=%u IsEnded: %d", this, aType, aReason, IsEnded());
  if (aReason == RequestSampleCallback::DECODE_ERROR) {
    GetCallback()->OnNotDecoded(aType, aReason);
    return;
  }
  // End of stream. Force switching past this stream to another reader by
  // switching to the end of the buffered range.
  MOZ_ASSERT(aReason == RequestSampleCallback::END_OF_STREAM);
  nsRefPtr<MediaDecoderReader> reader = aType == MediaData::AUDIO_DATA ?
                                          mAudioReader : mVideoReader;

  // Find the closest approximation to the end time for this stream.
  // mLast{Audio,Video}Time differs from the actual end time because of
  // Bug 1065207 - the duration of a WebM fragment is an estimate not the
  // actual duration. In the case of audio time an example of where they
  // differ would be the actual sample duration being small but the
  // previous sample being large. The buffered end time uses that last
  // sample duration as an estimate of the end time duration giving an end
  // time that is greater than mLastAudioTime, which is the actual sample
  // end time.
  // Reader switching is based on the buffered end time though so they can be
  // quite different. By using the EOS_FUZZ_US and the buffered end time we
  // attempt to account for this difference.
  int64_t* time = aType == MediaData::AUDIO_DATA ? &mLastAudioTime : &mLastVideoTime;
  if (reader) {
    nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges();
    reader->GetBuffered(ranges);
    if (ranges->Length() > 0) {
      // End time is a double so we convert to nearest by adding 0.5.
      int64_t end = ranges->GetEndTime() * USECS_PER_S + 0.5;
      *time = std::max(*time, end);
    }
  }

  // See if we can find a different reader that can pick up where we left off. We use the
  // EOS_FUZZ_US to allow for the fact that our end time can be inaccurate due to bug
  // 1065207 - the duration of a WebM frame is an estimate.
  if (aType == MediaData::AUDIO_DATA && SwitchAudioReader(*time + EOS_FUZZ_US)) {
    RequestAudioData();
    return;
  }
  if (aType == MediaData::VIDEO_DATA && SwitchVideoReader(*time + EOS_FUZZ_US)) {
    RequestVideoData(false, 0);
    return;
  }

  // If the entire MediaSource is done, generate an EndOfStream.
  if (IsEnded()) {
    GetCallback()->OnNotDecoded(aType, RequestSampleCallback::END_OF_STREAM);
    return;
  }

  // We don't have the data the caller wants. Tell that we're waiting for JS to
  // give us more data.
  GetCallback()->OnNotDecoded(aType, RequestSampleCallback::WAITING_FOR_DATA);
}
示例#5
0
void
MediaSourceReader::OnAudioEOS()
{
  MSE_DEBUG("MediaSourceReader(%p)::OnAudioEOS reader=%p (decoders=%u)",
            this, mAudioReader.get(), mAudioTrack->Decoders().Length());
  if (SwitchAudioReader(mLastAudioTime)) {
    // Success! Resume decoding with next audio decoder.
    RequestAudioData();
  } else if (IsEnded()) {
    // End of stream.
    MSE_DEBUG("MediaSourceReader(%p)::OnAudioEOS reader=%p EOS (decoders=%u)",
              this, mAudioReader.get(), mAudioTrack->Decoders().Length());
    GetCallback()->OnAudioEOS();
  }
}
示例#6
0
void
MediaSourceReader::OnVideoNotDecoded(NotDecodedReason aReason)
{
  MOZ_DIAGNOSTIC_ASSERT(!IsSeeking());
  mVideoRequest.Complete();

  MSE_DEBUG("aReason=%u IsEnded: %d", aReason, IsEnded());

  if (aReason == CANCELED) {
    mVideoPromise.Reject(CANCELED, __func__);
    return;
  }

  // if End of stream. Force switching past this stream to another reader by
  // switching to the end of the buffered range.
  int64_t lastVideoTime = mLastVideoTime;
  if (aReason == END_OF_STREAM && mVideoSourceDecoder) {
    AdjustEndTime(&mLastVideoTime, mVideoSourceDecoder);
  }

  // See if we can find a different reader that can pick up where we left off.
  SwitchSourceResult result = SwitchVideoSource(&mLastVideoTime);
  if (result == SOURCE_NEW) {
    GetVideoReader()->ResetDecode();
    mVideoSeekRequest.Begin(GetVideoReader()->Seek(GetReaderVideoTime(mLastVideoTime), 0)
                           ->RefableThen(GetTaskQueue(), __func__, this,
                                         &MediaSourceReader::CompleteVideoSeekAndDoRequest,
                                         &MediaSourceReader::CompleteVideoSeekAndRejectPromise));
    return;
  }

  // If we got a DECODE_ERROR and we have buffered data in the requested range
  // then it must be a genuine decoding error.
  // Otherwise we can assume that the data was either evicted or explicitely
  // removed from the source buffer and we should wait for new data.
  if (aReason == DECODE_ERROR && result != SOURCE_NONE) {
    mVideoPromise.Reject(DECODE_ERROR, __func__);
    return;
  }

  CheckForWaitOrEndOfStream(MediaData::VIDEO_DATA, mLastVideoTime);

  if (mLastVideoTime - lastVideoTime >= EOS_FUZZ_US) {
    // No decoders are available to switch to. We will re-attempt from the last
    // failing position.
    mLastVideoTime = lastVideoTime;
  }
}
示例#7
0
void
MediaSourceReader::OnVideoEOS()
{
  // End of stream. See if we can switch to another video decoder.
  MSE_DEBUG("MediaSourceReader(%p)::OnVideoEOS reader=%p (decoders=%u)",
            this, mVideoReader.get(), mVideoTrack->Decoders().Length());
  if (SwitchVideoReader(mLastVideoTime)) {
    // Success! Resume decoding with next video decoder.
    RequestVideoData(false, 0);
  } else if (IsEnded()) {
    // End of stream.
    MSE_DEBUG("MediaSourceReader(%p)::OnVideoEOS reader=%p EOS (decoders=%u)",
              this, mVideoReader.get(), mVideoTrack->Decoders().Length());
    GetCallback()->OnVideoEOS();
  }
}
示例#8
0
nsresult
MediaSourceReader::GetBuffered(dom::TimeRanges* aBuffered)
{
  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  MOZ_ASSERT(aBuffered->Length() == 0);
  if (mTrackBuffers.IsEmpty()) {
    return NS_OK;
  }

  double highestEndTime = 0;

  nsTArray<nsRefPtr<TimeRanges>> activeRanges;
  for (uint32_t i = 0; i < mTrackBuffers.Length(); ++i) {
    nsRefPtr<TimeRanges> r = new TimeRanges();
    mTrackBuffers[i]->Buffered(r);
    activeRanges.AppendElement(r);
    highestEndTime = std::max(highestEndTime, activeRanges.LastElement()->GetEndTime());
  }

  TimeRanges* intersectionRanges = aBuffered;
  intersectionRanges->Add(0, highestEndTime);

  for (uint32_t i = 0; i < activeRanges.Length(); ++i) {
    TimeRanges* sourceRanges = activeRanges[i];

    if (IsEnded()) {
      // Set the end time on the last range to highestEndTime by adding a
      // new range spanning the current end time to highestEndTime, which
      // Normalize() will then merge with the old last range.
      sourceRanges->Add(sourceRanges->GetEndTime(), highestEndTime);
      sourceRanges->Normalize();
    }

    intersectionRanges->Intersection(sourceRanges);
  }

  MSE_DEBUG("ranges=%s", DumpTimeRanges(intersectionRanges).get());
  return NS_OK;
}
void
MediaSourceReader::OnVideoNotDecoded(NotDecodedReason aReason)
{
  MSE_DEBUG("MediaSourceReader(%p)::OnVideoNotDecoded aReason=%u IsEnded: %d", this, aReason, IsEnded());
  if (aReason == DECODE_ERROR || aReason == CANCELED) {
    mVideoPromise.Reject(aReason, __func__);
    return;
  }

  // End of stream. Force switching past this stream to another reader by
  // switching to the end of the buffered range.
  MOZ_ASSERT(aReason == END_OF_STREAM);
  if (mVideoReader) {
    AdjustEndTime(&mLastVideoTime, mVideoReader);
  }

  // See if we can find a different reader that can pick up where we left off. We use the
  // EOS_FUZZ_US to allow for the fact that our end time can be inaccurate due to bug
  // 1065207.
  if (SwitchVideoReader(mLastVideoTime + EOS_FUZZ_US)) {
    mVideoReader->RequestVideoData(false, 0)
                ->Then(GetTaskQueue(), __func__, this,
                       &MediaSourceReader::OnVideoDecoded,
                       &MediaSourceReader::OnVideoNotDecoded);
    return;
  }

  // If the entire MediaSource is done, generate an EndOfStream.
  if (IsEnded()) {
    mVideoPromise.Reject(END_OF_STREAM, __func__);
    return;
  }

  // We don't have the data the caller wants. Tell that we're waiting for JS to
  // give us more data.
  mVideoPromise.Reject(WAITING_FOR_DATA, __func__);
}