TimeRanges* SourceBuffer::GetBuffered(ErrorResult& aRv) { MOZ_ASSERT(NS_IsMainThread()); // http://w3c.github.io/media-source/index.html#widl-SourceBuffer-buffered // 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw an InvalidStateError exception and abort these steps. if (!IsAttached()) { aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); return nullptr; } bool rangeChanged = true; media::TimeIntervals intersection = mTrackBuffersManager->Buffered(); MSE_DEBUGV("intersection=%s", DumpTimeRanges(intersection).get()); if (mBuffered) { media::TimeIntervals currentValue(mBuffered); rangeChanged = (intersection != currentValue); MSE_DEBUGV("currentValue=%s", DumpTimeRanges(currentValue).get()); } // 5. If intersection ranges does not contain the exact same range information as the current value of this attribute, then update the current value of this attribute to intersection ranges. if (rangeChanged) { mBuffered = new TimeRanges(ToSupports(this)); intersection.ToTimeRanges(mBuffered); } // 6. Return the current value of this attribute. return mBuffered; }
already_AddRefed<SourceBufferDecoder> MediaSourceDecoder::SelectDecoder(int64_t aTarget, int64_t aTolerance, const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders) { ReentrantMonitorAutoEnter mon(GetReentrantMonitor()); // Consider decoders in order of newest to oldest, as a newer decoder // providing a given buffered range is expected to replace an older one. for (int32_t i = aTrackDecoders.Length() - 1; i >= 0; --i) { nsRefPtr<SourceBufferDecoder> newDecoder = aTrackDecoders[i]; nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges(); newDecoder->GetBuffered(ranges); if (ranges->Find(double(aTarget) / USECS_PER_S, double(aTolerance) / USECS_PER_S) == dom::TimeRanges::NoIndex) { MSE_DEBUGV("SelectDecoder(%lld fuzz:%lld) newDecoder=%p (%d/%d) target not in ranges=%s", aTarget, aTolerance, newDecoder.get(), i+1, aTrackDecoders.Length(), DumpTimeRanges(ranges).get()); continue; } return newDecoder.forget(); } return nullptr; }
already_AddRefed<SourceBufferDecoder> MediaSourceDecoder::SelectDecoder(int64_t aTarget, int64_t aTolerance, const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders) { MOZ_ASSERT(!mIsUsingFormatReader); ReentrantMonitorAutoEnter mon(GetReentrantMonitor()); media::TimeUnit target{media::TimeUnit::FromMicroseconds(aTarget)}; media::TimeUnit tolerance{media::TimeUnit::FromMicroseconds(aTolerance + aTarget)}; // aTolerance gives a slight bias toward the start of a range only. // Consider decoders in order of newest to oldest, as a newer decoder // providing a given buffered range is expected to replace an older one. for (int32_t i = aTrackDecoders.Length() - 1; i >= 0; --i) { nsRefPtr<SourceBufferDecoder> newDecoder = aTrackDecoders[i]; media::TimeIntervals ranges = newDecoder->GetBuffered(); for (uint32_t j = 0; j < ranges.Length(); j++) { if (target < ranges.End(j) && tolerance >= ranges.Start(j)) { return newDecoder.forget(); } } MSE_DEBUGV("SelectDecoder(%lld fuzz:%lld) newDecoder=%p (%d/%d) target not in ranges=%s", aTarget, aTolerance, newDecoder.get(), i+1, aTrackDecoders.Length(), DumpTimeRanges(ranges).get()); } return nullptr; }
void MediaSourceReader::OnVideoDecoded(VideoData* aSample) { MSE_DEBUGV("MediaSourceReader(%p)::OnVideoDecoded [mTime=%lld mDuration=%lld mDiscontinuity=%d]", this, aSample->mTime, aSample->mDuration, aSample->mDiscontinuity); if (mDropVideoBeforeThreshold) { if (aSample->mTime < mTimeThreshold) { MSE_DEBUG("MediaSourceReader(%p)::OnVideoDecoded mTime=%lld < mTimeThreshold=%lld", this, aSample->mTime, mTimeThreshold); mVideoReader->RequestVideoData(false, 0)->Then(GetTaskQueue(), __func__, this, &MediaSourceReader::OnVideoDecoded, &MediaSourceReader::OnVideoNotDecoded); return; } mDropVideoBeforeThreshold = false; } // Any OnVideoDecoded callbacks received while mVideoIsSeeking must be not // update our last used timestamp, as these are emitted by the reader we're // switching away from. if (!mVideoIsSeeking) { mLastVideoTime = aSample->mTime + aSample->mDuration; } mVideoPromise.Resolve(aSample, __func__); }
already_AddRefed<TimeRanges> SourceBuffer::GetBuffered(ErrorResult& aRv) { MOZ_ASSERT(NS_IsMainThread()); if (!IsAttached()) { aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); return nullptr; } double highestEndTime = 0; nsRefPtr<TimeRanges> ranges = new TimeRanges(); // TODO: Need to adjust mDecoders so it only tracks active decoders. // Once we have an abstraction for track buffers, this needs to report the // intersection of buffered ranges within those track buffers. for (uint32_t i = 0; i < mDecoders.Length(); ++i) { nsRefPtr<TimeRanges> r = new TimeRanges(); mDecoders[i]->GetBuffered(r); if (r->Length() > 0) { highestEndTime = std::max(highestEndTime, r->GetEndTime()); ranges->Union(r); } } if (mMediaSource->ReadyState() == MediaSourceReadyState::Ended) { // Set the end time on the last range to highestEndTime by adding a // new range spanning the current end time to highestEndTime, which // Normalize() will then merge with the old last range. ranges->Add(ranges->GetEndTime(), highestEndTime); ranges->Normalize(); } MSE_DEBUGV("SourceBuffer(%p)::GetBuffered ranges=%s", this, DumpTimeRanges(ranges).get()); return ranges.forget(); }
nsRefPtr<MediaDecoderReader::VideoDataPromise> MediaSourceReader::RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold, bool aForceDecodeAhead) { MOZ_ASSERT(OnTaskQueue()); MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(), "No sample requests allowed while seeking"); MOZ_DIAGNOSTIC_ASSERT(mVideoPromise.IsEmpty(), "No duplicate sample requests"); nsRefPtr<VideoDataPromise> p = mVideoPromise.Ensure(__func__); MSE_DEBUGV("RequestVideoData(%d, %lld), mLastVideoTime=%lld", aSkipToNextKeyframe, aTimeThreshold, mLastVideoTime); if (!mVideoTrack) { MSE_DEBUG("called with no video track"); mVideoPromise.Reject(DECODE_ERROR, __func__); return p; } if (aSkipToNextKeyframe) { mTimeThreshold = aTimeThreshold; mDropAudioBeforeThreshold = true; mDropVideoBeforeThreshold = true; } if (IsSeeking()) { MSE_DEBUG("called mid-seek. Rejecting."); mVideoPromise.Reject(CANCELED, __func__); return p; } MOZ_DIAGNOSTIC_ASSERT(!mVideoSeekRequest.Exists()); mForceVideoDecodeAhead = aForceDecodeAhead; SwitchSourceResult ret = SwitchVideoSource(&mLastVideoTime); switch (ret) { case SOURCE_NEW: GetVideoReader()->ResetDecode(); mVideoSeekRequest.Begin(GetVideoReader()->Seek(GetReaderVideoTime(mLastVideoTime), 0) ->Then(OwnerThread(), __func__, this, &MediaSourceReader::CompleteVideoSeekAndDoRequest, &MediaSourceReader::CompleteVideoSeekAndRejectPromise)); break; case SOURCE_NONE: if (!mLastVideoTime) { // This is the first call to RequestVideoData. // Fallback to using decoder with earliest data. mVideoSourceDecoder = FirstDecoder(MediaData::VIDEO_DATA); } if (mLastVideoTime || !mVideoSourceDecoder) { CheckForWaitOrEndOfStream(MediaData::VIDEO_DATA, mLastVideoTime); break; } // Fallback to getting first frame from first decoder. default: DoVideoRequest(); break; } return p; }
void MediaSourceReader::RequestAudioData() { MSE_DEBUGV("MediaSourceReader(%p)::RequestAudioData", this); if (!mAudioReader) { MSE_DEBUG("MediaSourceReader(%p)::RequestAudioData called with no audio reader", this); GetCallback()->OnDecodeError(); return; } mAudioIsSeeking = false; SwitchAudioReader(mLastAudioTime); mAudioReader->RequestAudioData(); }
void MediaSourceReader::RequestAudioData() { MSE_DEBUGV("MediaSourceReader(%p)::RequestAudioData", this); if (!mAudioReader) { MSE_DEBUG("MediaSourceReader(%p)::RequestAudioData called with no audio reader", this); GetCallback()->OnNotDecoded(MediaData::AUDIO_DATA, RequestSampleCallback::DECODE_ERROR); return; } mAudioIsSeeking = false; SwitchAudioReader(mLastAudioTime); mAudioReader->RequestAudioData(); }
bool MediaSourceReader::SwitchVideoReader(int64_t aTarget) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); // XXX: Can't handle adding a video track after ReadMetadata. if (!mVideoTrack) { return false; } nsRefPtr<MediaDecoderReader> newReader = SelectReader(aTarget, mVideoTrack->Decoders()); if (newReader && newReader != mVideoReader) { mVideoReader->SetIdle(); mVideoReader = newReader; MSE_DEBUGV("MediaSourceReader(%p)::SwitchVideoReader switched reader to %p", this, mVideoReader.get()); return true; } return false; }
nsRefPtr<MediaDecoderReader::AudioDataPromise> MediaSourceReader::RequestAudioData() { MOZ_ASSERT(OnTaskQueue()); MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(), "No sample requests allowed while seeking"); MOZ_DIAGNOSTIC_ASSERT(mAudioPromise.IsEmpty(), "No duplicate sample requests"); nsRefPtr<AudioDataPromise> p = mAudioPromise.Ensure(__func__); MSE_DEBUGV("mLastAudioTime=%lld", mLastAudioTime); if (!mAudioTrack) { MSE_DEBUG("called with no audio track"); mAudioPromise.Reject(DECODE_ERROR, __func__); return p; } if (IsSeeking()) { MSE_DEBUG("called mid-seek. Rejecting."); mAudioPromise.Reject(CANCELED, __func__); return p; } MOZ_DIAGNOSTIC_ASSERT(!mAudioSeekRequest.Exists()); SwitchSourceResult ret = SwitchAudioSource(&mLastAudioTime); switch (ret) { case SOURCE_NEW: GetAudioReader()->ResetDecode(); mAudioSeekRequest.Begin(GetAudioReader()->Seek(GetReaderAudioTime(mLastAudioTime), 0) ->Then(OwnerThread(), __func__, this, &MediaSourceReader::CompleteAudioSeekAndDoRequest, &MediaSourceReader::CompleteAudioSeekAndRejectPromise)); break; case SOURCE_NONE: if (!mLastAudioTime) { // This is the first call to RequestAudioData. // Fallback to using decoder with earliest data. mAudioSourceDecoder = FirstDecoder(MediaData::AUDIO_DATA); } if (mLastAudioTime || !mAudioSourceDecoder) { CheckForWaitOrEndOfStream(MediaData::AUDIO_DATA, mLastAudioTime); break; } // Fallback to getting first frame from first decoder. default: DoAudioRequest(); break; } return p; }
nsRefPtr<MediaDecoderReader::AudioDataPromise> MediaSourceReader::RequestAudioData() { nsRefPtr<AudioDataPromise> p = mAudioPromise.Ensure(__func__); MSE_DEBUGV("MediaSourceReader(%p)::RequestAudioData", this); if (!mAudioReader) { MSE_DEBUG("MediaSourceReader(%p)::RequestAudioData called with no audio reader", this); mAudioPromise.Reject(DECODE_ERROR, __func__); return p; } mAudioIsSeeking = false; SwitchAudioReader(mLastAudioTime); mAudioReader->RequestAudioData()->Then(GetTaskQueue(), __func__, this, &MediaSourceReader::OnAudioDecoded, &MediaSourceReader::OnAudioNotDecoded); return p; }
MediaSourceReader::SwitchSourceResult MediaSourceReader::SwitchVideoSource(int64_t* aTarget) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); // XXX: Can't handle adding a video track after ReadMetadata. if (!mVideoTrack) { return SOURCE_NONE; } // We first search without the tolerance and then search with it, so that, in // the case of perfectly-aligned data, we don't prematurely jump to a new // reader and skip the last few samples of the current one. bool usedFuzz = false; nsRefPtr<SourceBufferDecoder> newDecoder = SelectDecoder(*aTarget, /* aTolerance = */ 0, mVideoTrack->Decoders()); if (!newDecoder) { newDecoder = SelectDecoder(*aTarget, EOS_FUZZ_US, mVideoTrack->Decoders()); usedFuzz = true; } if (GetVideoReader() && mVideoSourceDecoder != newDecoder) { GetVideoReader()->SetIdle(); } if (!newDecoder) { mVideoSourceDecoder = nullptr; return SOURCE_NONE; } if (newDecoder == mVideoSourceDecoder) { return SOURCE_EXISTING; } mVideoSourceDecoder = newDecoder; if (usedFuzz) { // A decoder buffered range is continuous. We would have failed the exact // search but succeeded the fuzzy one if our target was shortly before // start time. nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges(); newDecoder->GetBuffered(ranges); int64_t startTime = ranges->GetStartTime() * USECS_PER_S; if (*aTarget < startTime) { *aTarget = startTime; } } MSE_DEBUGV("switched decoder to %p (fuzz:%d)", mVideoSourceDecoder.get(), usedFuzz); return SOURCE_NEW; }
AtomParser(const nsACString& aType, const MediaLargeByteBuffer* aData) { const nsCString mType(aType); // for logging macro. mp4_demuxer::ByteReader reader(aData); mp4_demuxer::AtomType initAtom("ftyp"); mp4_demuxer::AtomType mediaAtom("moof"); while (reader.Remaining() >= 8) { uint64_t size = reader.ReadU32(); const uint8_t* typec = reader.Peek(4); uint32_t type = reader.ReadU32(); MSE_DEBUGV(AtomParser ,"Checking atom:'%c%c%c%c'", typec[0], typec[1], typec[2], typec[3]); if (mInitOffset.isNothing() && mp4_demuxer::AtomType(type) == initAtom) { mInitOffset = Some(reader.Offset()); } if (mMediaOffset.isNothing() && mp4_demuxer::AtomType(type) == mediaAtom) { mMediaOffset = Some(reader.Offset()); } if (mInitOffset.isSome() && mMediaOffset.isSome()) { // We have everything we need. break; } if (size == 1) { // 64 bits size. if (!reader.CanReadType<uint64_t>()) { break; } size = reader.ReadU64(); } else if (size == 0) { // Atom extends to the end of the buffer, it can't have what we're // looking for. break; } if (reader.Remaining() < size - 8) { // Incomplete atom. break; } reader.Read(size - 8); } reader.DiscardRemaining(); }
void MediaSourceReader::RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold) { MSE_DEBUGV("MediaSourceReader(%p)::RequestVideoData(%d, %lld)", this, aSkipToNextKeyframe, aTimeThreshold); if (!mVideoReader) { MSE_DEBUG("MediaSourceReader(%p)::RequestVideoData called with no video reader", this); GetCallback()->OnNotDecoded(MediaData::VIDEO_DATA, RequestSampleCallback::DECODE_ERROR); return; } if (aSkipToNextKeyframe) { mTimeThreshold = aTimeThreshold; mDropAudioBeforeThreshold = true; mDropVideoBeforeThreshold = true; } mVideoIsSeeking = false; SwitchVideoReader(mLastVideoTime); mVideoReader->RequestVideoData(aSkipToNextKeyframe, aTimeThreshold); }
already_AddRefed<TimeRanges> SourceBuffer::GetBuffered(ErrorResult& aRv) { MOZ_ASSERT(NS_IsMainThread()); if (!IsAttached()) { aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); return nullptr; } nsRefPtr<TimeRanges> ranges = new TimeRanges(); double highestEndTime = mTrackBuffer->Buffered(ranges); if (mMediaSource->ReadyState() == MediaSourceReadyState::Ended) { // Set the end time on the last range to highestEndTime by adding a // new range spanning the current end time to highestEndTime, which // Normalize() will then merge with the old last range. ranges->Add(ranges->GetEndTime(), highestEndTime); ranges->Normalize(); } MSE_DEBUGV("ranges=%s", DumpTimeRanges(ranges).get()); return ranges.forget(); }
void MediaSourceReader::OnVideoDecoded(VideoData* aSample) { MOZ_DIAGNOSTIC_ASSERT(!IsSeeking()); mVideoRequest.Complete(); // Adjust the sample time into our reference. int64_t ourTime = aSample->mTime + mVideoSourceDecoder->GetTimestampOffset(); if (aSample->mDiscontinuity) { mVideoDiscontinuity = true; } MSE_DEBUGV("[mTime=%lld mDuration=%lld mDiscontinuity=%d]", ourTime, aSample->mDuration, aSample->mDiscontinuity); if (mDropVideoBeforeThreshold) { if (ourTime < mTimeThreshold) { MSE_DEBUG("mTime=%lld < mTimeThreshold=%lld", ourTime, mTimeThreshold); DoVideoRequest(); return; } mDropVideoBeforeThreshold = false; mTimeThreshold = 0; } // Adjust the sample time into our reference. nsRefPtr<VideoData> newSample = VideoData::ShallowCopyUpdateTimestampAndDuration(aSample, ourTime, aSample->mDuration); mLastVideoTime = newSample->GetEndTime(); if (mVideoDiscontinuity) { newSample->mDiscontinuity = true; mVideoDiscontinuity = false; } mVideoPromise.Resolve(newSample, __func__); }
void MediaSourceReader::OnAudioDecoded(AudioData* aSample) { MOZ_DIAGNOSTIC_ASSERT(!IsSeeking()); mAudioRequest.Complete(); int64_t ourTime = aSample->mTime + mAudioSourceDecoder->GetTimestampOffset(); if (aSample->mDiscontinuity) { mAudioDiscontinuity = true; } MSE_DEBUGV("[mTime=%lld mDuration=%lld mDiscontinuity=%d]", ourTime, aSample->mDuration, aSample->mDiscontinuity); if (mDropAudioBeforeThreshold) { if (ourTime < mTimeThreshold) { MSE_DEBUG("mTime=%lld < mTimeThreshold=%lld", ourTime, mTimeThreshold); mAudioRequest.Begin(GetAudioReader()->RequestAudioData() ->RefableThen(GetTaskQueue(), __func__, this, &MediaSourceReader::OnAudioDecoded, &MediaSourceReader::OnAudioNotDecoded)); return; } mDropAudioBeforeThreshold = false; } // Adjust the sample time into our reference. nsRefPtr<AudioData> newSample = AudioData::TransferAndUpdateTimestampAndDuration(aSample, ourTime, aSample->mDuration); mLastAudioTime = newSample->GetEndTime(); if (mAudioDiscontinuity) { newSample->mDiscontinuity = true; mAudioDiscontinuity = false; } mAudioPromise.Resolve(newSample, __func__); }
already_AddRefed<MediaDecoderReader> MediaSourceReader::SelectReader(int64_t aTarget, const nsTArray<nsRefPtr<SourceBufferDecoder>>& aTrackDecoders) { mDecoder->GetReentrantMonitor().AssertCurrentThreadIn(); // Consider decoders in order of newest to oldest, as a newer decoder // providing a given buffered range is expected to replace an older one. for (int32_t i = aTrackDecoders.Length() - 1; i >= 0; --i) { nsRefPtr<MediaDecoderReader> newReader = aTrackDecoders[i]->GetReader(); nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges(); aTrackDecoders[i]->GetBuffered(ranges); if (ranges->Find(double(aTarget) / USECS_PER_S) == dom::TimeRanges::NoIndex) { MSE_DEBUGV("MediaSourceReader(%p)::SelectReader(%lld) newReader=%p target not in ranges=%s", this, aTarget, newReader.get(), DumpTimeRanges(ranges).get()); continue; } return newReader.forget(); } return nullptr; }
nsRefPtr<MediaDecoderReader::VideoDataPromise> MediaSourceReader::RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold) { nsRefPtr<VideoDataPromise> p = mVideoPromise.Ensure(__func__); MSE_DEBUGV("MediaSourceReader(%p)::RequestVideoData(%d, %lld)", this, aSkipToNextKeyframe, aTimeThreshold); if (!mVideoReader) { MSE_DEBUG("MediaSourceReader(%p)::RequestVideoData called with no video reader", this); mVideoPromise.Reject(DECODE_ERROR, __func__); return p; } if (aSkipToNextKeyframe) { mTimeThreshold = aTimeThreshold; mDropAudioBeforeThreshold = true; mDropVideoBeforeThreshold = true; } mVideoIsSeeking = false; SwitchVideoReader(mLastVideoTime); mVideoReader->RequestVideoData(aSkipToNextKeyframe, aTimeThreshold) ->Then(GetTaskQueue(), __func__, this, &MediaSourceReader::OnVideoDecoded, &MediaSourceReader::OnVideoNotDecoded); return p; }
void MediaSourceReader::OnAudioDecoded(AudioData* aSample) { MSE_DEBUGV("MediaSourceReader(%p)::OnAudioDecoded [mTime=%lld mDuration=%lld mDiscontinuity=%d]", this, aSample->mTime, aSample->mDuration, aSample->mDiscontinuity); if (mDropAudioBeforeThreshold) { if (aSample->mTime < mTimeThreshold) { MSE_DEBUG("MediaSourceReader(%p)::OnAudioDecoded mTime=%lld < mTimeThreshold=%lld", this, aSample->mTime, mTimeThreshold); delete aSample; mAudioReader->RequestAudioData(); return; } mDropAudioBeforeThreshold = false; } // Any OnAudioDecoded callbacks received while mAudioIsSeeking must be not // update our last used timestamp, as these are emitted by the reader we're // switching away from. if (!mAudioIsSeeking) { mLastAudioTime = aSample->mTime + aSample->mDuration; } GetCallback()->OnAudioDecoded(aSample); }