void MediaSourceReader::ReleaseMediaResources() { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); if (GetVideoReader()) { GetVideoReader()->ReleaseMediaResources(); } }
size_t MediaSourceReader::SizeOfVideoQueueInFrames() { if (!GetVideoReader()) { MSE_DEBUG("called with no video reader"); return 0; } return GetVideoReader()->SizeOfVideoQueueInFrames(); }
nsRefPtr<MediaDecoderReader::VideoDataPromise> MediaSourceReader::RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold, bool aForceDecodeAhead) { MOZ_ASSERT(OnTaskQueue()); MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(), "No sample requests allowed while seeking"); MOZ_DIAGNOSTIC_ASSERT(mVideoPromise.IsEmpty(), "No duplicate sample requests"); nsRefPtr<VideoDataPromise> p = mVideoPromise.Ensure(__func__); MSE_DEBUGV("RequestVideoData(%d, %lld), mLastVideoTime=%lld", aSkipToNextKeyframe, aTimeThreshold, mLastVideoTime); if (!mVideoTrack) { MSE_DEBUG("called with no video track"); mVideoPromise.Reject(DECODE_ERROR, __func__); return p; } if (aSkipToNextKeyframe) { mTimeThreshold = aTimeThreshold; mDropAudioBeforeThreshold = true; mDropVideoBeforeThreshold = true; } if (IsSeeking()) { MSE_DEBUG("called mid-seek. Rejecting."); mVideoPromise.Reject(CANCELED, __func__); return p; } MOZ_DIAGNOSTIC_ASSERT(!mVideoSeekRequest.Exists()); mForceVideoDecodeAhead = aForceDecodeAhead; SwitchSourceResult ret = SwitchVideoSource(&mLastVideoTime); switch (ret) { case SOURCE_NEW: GetVideoReader()->ResetDecode(); mVideoSeekRequest.Begin(GetVideoReader()->Seek(GetReaderVideoTime(mLastVideoTime), 0) ->Then(OwnerThread(), __func__, this, &MediaSourceReader::CompleteVideoSeekAndDoRequest, &MediaSourceReader::CompleteVideoSeekAndRejectPromise)); break; case SOURCE_NONE: if (!mLastVideoTime) { // This is the first call to RequestVideoData. // Fallback to using decoder with earliest data. mVideoSourceDecoder = FirstDecoder(MediaData::VIDEO_DATA); } if (mLastVideoTime || !mVideoSourceDecoder) { CheckForWaitOrEndOfStream(MediaData::VIDEO_DATA, mLastVideoTime); break; } // Fallback to getting first frame from first decoder. default: DoVideoRequest(); break; } return p; }
bool MediaSourceReader::IsDormantNeeded() { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); if (GetVideoReader()) { return GetVideoReader()->IsDormantNeeded(); } return false; }
nsresult MediaSourceReader::ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); MSE_DEBUG("tracks=%u/%u audio=%p video=%p", mEssentialTrackBuffers.Length(), mTrackBuffers.Length(), mAudioTrack.get(), mVideoTrack.get()); mEssentialTrackBuffers.Clear(); if (!mAudioTrack && !mVideoTrack) { MSE_DEBUG("missing track: mAudioTrack=%p mVideoTrack=%p", mAudioTrack.get(), mVideoTrack.get()); return NS_ERROR_FAILURE; } if (mAudioTrack == mVideoTrack) { NS_WARNING("Combined audio/video sourcebuffer, this is an unsupported " "configuration, only using video track"); mAudioTrack = nullptr; } if (mAudioTrack) { MOZ_ASSERT(mAudioTrack->IsReady()); mAudioSourceDecoder = mAudioTrack->Decoders()[0]; const MediaInfo& info = GetAudioReader()->GetMediaInfo(); MOZ_ASSERT(info.HasAudio()); mInfo.mAudio = info.mAudio; mInfo.mCrypto.AddInitData(info.mCrypto); MSE_DEBUG("audio reader=%p duration=%lld", mAudioSourceDecoder.get(), mInfo.mMetadataDuration.isSome() ? mInfo.mMetadataDuration.ref().ToMicroseconds() : -1); } if (mVideoTrack) { MOZ_ASSERT(mVideoTrack->IsReady()); mVideoSourceDecoder = mVideoTrack->Decoders()[0]; const MediaInfo& info = GetVideoReader()->GetMediaInfo(); MOZ_ASSERT(info.HasVideo()); mInfo.mVideo = info.mVideo; mInfo.mCrypto.AddInitData(info.mCrypto); MSE_DEBUG("video reader=%p duration=%lld", GetVideoReader(), mInfo.mMetadataDuration.isSome() ? mInfo.mMetadataDuration.ref().ToMicroseconds() : -1); } *aInfo = mInfo; *aTags = nullptr; // TODO: Handle metadata. return NS_OK; }
void MediaSourceReader::OnVideoNotDecoded(NotDecodedReason aReason) { MOZ_DIAGNOSTIC_ASSERT(!IsSeeking()); mVideoRequest.Complete(); MSE_DEBUG("aReason=%u IsEnded: %d", aReason, IsEnded()); if (aReason == CANCELED) { mVideoPromise.Reject(CANCELED, __func__); return; } // if End of stream. Force switching past this stream to another reader by // switching to the end of the buffered range. int64_t lastVideoTime = mLastVideoTime; if (aReason == END_OF_STREAM && mVideoSourceDecoder) { AdjustEndTime(&mLastVideoTime, mVideoSourceDecoder); } // See if we can find a different reader that can pick up where we left off. SwitchSourceResult result = SwitchVideoSource(&mLastVideoTime); if (result == SOURCE_NEW) { GetVideoReader()->ResetDecode(); mVideoSeekRequest.Begin(GetVideoReader()->Seek(GetReaderVideoTime(mLastVideoTime), 0) ->RefableThen(GetTaskQueue(), __func__, this, &MediaSourceReader::CompleteVideoSeekAndDoRequest, &MediaSourceReader::CompleteVideoSeekAndRejectPromise)); return; } // If we got a DECODE_ERROR and we have buffered data in the requested range // then it must be a genuine decoding error. // Otherwise we can assume that the data was either evicted or explicitely // removed from the source buffer and we should wait for new data. if (aReason == DECODE_ERROR && result != SOURCE_NONE) { mVideoPromise.Reject(DECODE_ERROR, __func__); return; } CheckForWaitOrEndOfStream(MediaData::VIDEO_DATA, mLastVideoTime); if (mLastVideoTime - lastVideoTime >= EOS_FUZZ_US) { // No decoders are available to switch to. We will re-attempt from the last // failing position. mLastVideoTime = lastVideoTime; } }
void MediaSourceReader::GetMozDebugReaderData(nsAString& aString) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); nsAutoCString result; result += nsPrintfCString("Dumping data for reader %p:\n", this); if (mAudioTrack) { result += nsPrintfCString("\tDumping Audio Track Decoders: - mLastAudioTime: %f\n", double(mLastAudioTime) / USECS_PER_S); for (int32_t i = mAudioTrack->Decoders().Length() - 1; i >= 0; --i) { nsRefPtr<MediaDecoderReader> newReader = mAudioTrack->Decoders()[i]->GetReader(); nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges(); mAudioTrack->Decoders()[i]->GetBuffered(ranges); result += nsPrintfCString("\t\tReader %d: %p ranges=%s active=%s size=%lld\n", i, newReader.get(), DumpTimeRanges(ranges).get(), newReader.get() == GetAudioReader() ? "true" : "false", mAudioTrack->Decoders()[i]->GetResource()->GetSize()); } } if (mVideoTrack) { result += nsPrintfCString("\tDumping Video Track Decoders - mLastVideoTime: %f\n", double(mLastVideoTime) / USECS_PER_S); for (int32_t i = mVideoTrack->Decoders().Length() - 1; i >= 0; --i) { nsRefPtr<MediaDecoderReader> newReader = mVideoTrack->Decoders()[i]->GetReader(); nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges(); mVideoTrack->Decoders()[i]->GetBuffered(ranges); result += nsPrintfCString("\t\tReader %d: %p ranges=%s active=%s size=%lld\n", i, newReader.get(), DumpTimeRanges(ranges).get(), newReader.get() == GetVideoReader() ? "true" : "false", mVideoTrack->Decoders()[i]->GetResource()->GetSize()); } } aString += NS_ConvertUTF8toUTF16(result); }
nsresult MediaSourceReader::ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); MSE_DEBUG("tracks=%u/%u audio=%p video=%p", mEssentialTrackBuffers.Length(), mTrackBuffers.Length(), mAudioTrack.get(), mVideoTrack.get()); mEssentialTrackBuffers.Clear(); if (!mAudioTrack && !mVideoTrack) { MSE_DEBUG("missing track: mAudioTrack=%p mVideoTrack=%p", mAudioTrack.get(), mVideoTrack.get()); return NS_ERROR_FAILURE; } if (mAudioTrack) { MOZ_ASSERT(mAudioTrack->IsReady()); mAudioSourceDecoder = mAudioTrack->Decoders()[0]; const MediaInfo& info = GetAudioReader()->GetMediaInfo(); MOZ_ASSERT(info.HasAudio()); mInfo.mAudio = info.mAudio; mInfo.mCrypto.AddInitData(info.mCrypto); MSE_DEBUG("audio reader=%p duration=%lld", mAudioSourceDecoder.get(), mAudioSourceDecoder->GetReader()->GetDecoder()->GetMediaDuration()); } if (mVideoTrack) { MOZ_ASSERT(mVideoTrack->IsReady()); mVideoSourceDecoder = mVideoTrack->Decoders()[0]; const MediaInfo& info = GetVideoReader()->GetMediaInfo(); MOZ_ASSERT(info.HasVideo()); mInfo.mVideo = info.mVideo; mInfo.mCrypto.AddInitData(info.mCrypto); MSE_DEBUG("video reader=%p duration=%lld", GetVideoReader(), GetVideoReader()->GetDecoder()->GetMediaDuration()); } *aInfo = mInfo; *aTags = nullptr; // TODO: Handle metadata. return NS_OK; }
void MediaSourceReader::DoVideoRequest() { mVideoRequest.Begin(GetVideoReader()->RequestVideoData(mDropVideoBeforeThreshold, GetReaderVideoTime(mTimeThreshold)) ->RefableThen(GetTaskQueue(), __func__, this, &MediaSourceReader::OnVideoDecoded, &MediaSourceReader::OnVideoNotDecoded)); }
MediaSourceReader::SwitchSourceResult MediaSourceReader::SwitchVideoSource(int64_t* aTarget) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); // XXX: Can't handle adding a video track after ReadMetadata. if (!mVideoTrack) { return SOURCE_NONE; } // We first search without the tolerance and then search with it, so that, in // the case of perfectly-aligned data, we don't prematurely jump to a new // reader and skip the last few samples of the current one. bool usedFuzz = false; nsRefPtr<SourceBufferDecoder> newDecoder = SelectDecoder(*aTarget, /* aTolerance = */ 0, mVideoTrack->Decoders()); if (!newDecoder) { newDecoder = SelectDecoder(*aTarget, EOS_FUZZ_US, mVideoTrack->Decoders()); usedFuzz = true; } if (GetVideoReader() && mVideoSourceDecoder != newDecoder) { GetVideoReader()->SetIdle(); } if (!newDecoder) { mVideoSourceDecoder = nullptr; return SOURCE_NONE; } if (newDecoder == mVideoSourceDecoder) { return SOURCE_EXISTING; } mVideoSourceDecoder = newDecoder; if (usedFuzz) { // A decoder buffered range is continuous. We would have failed the exact // search but succeeded the fuzzy one if our target was shortly before // start time. nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges(); newDecoder->GetBuffered(ranges); int64_t startTime = ranges->GetStartTime() * USECS_PER_S; if (*aTarget < startTime) { *aTarget = startTime; } } MSE_DEBUGV("switched decoder to %p (fuzz:%d)", mVideoSourceDecoder.get(), usedFuzz); return SOURCE_NEW; }
void MediaSourceReader::DoVideoRequest() { mVideoRequest.Begin(GetVideoReader()->RequestVideoData(mDropVideoBeforeThreshold, GetReaderVideoTime(mTimeThreshold), mForceVideoDecodeAhead) ->Then(OwnerThread(), __func__, this, &MediaSourceReader::OnVideoDecoded, &MediaSourceReader::OnVideoNotDecoded)); }
void MediaSourceReader::DoVideoSeek() { int64_t seekTime = mPendingSeekTime; if (mSeekToEnd) { seekTime = LastSampleTime(MediaData::VIDEO_DATA); } if (SwitchVideoSource(&seekTime) == SOURCE_NONE) { // Data we need got evicted since the last time we checked for data // availability. Abort current seek attempt. mWaitingForSeekData = true; return; } GetVideoReader()->ResetDecode(); mVideoSeekRequest.Begin(GetVideoReader()->Seek(GetReaderVideoTime(seekTime), 0) ->RefableThen(GetTaskQueue(), __func__, this, &MediaSourceReader::OnVideoSeekCompleted, &MediaSourceReader::OnVideoSeekFailed)); MSE_DEBUG("reader=%p", GetVideoReader()); }
nsresult MediaSourceReader::ResetDecode() { MOZ_ASSERT(OnTaskQueue()); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); MSE_DEBUG(""); // Any previous requests we've been waiting on are now unwanted. mAudioRequest.DisconnectIfExists(); mVideoRequest.DisconnectIfExists(); mAudioSeekRequest.DisconnectIfExists(); mVideoSeekRequest.DisconnectIfExists(); // Additionally, reject any outstanding promises _we_ made that we might have // been waiting on the above to fulfill. mAudioPromise.RejectIfExists(CANCELED, __func__); mVideoPromise.RejectIfExists(CANCELED, __func__); mSeekPromise.RejectIfExists(NS_OK, __func__); // Do the same for any data wait promises. mAudioWaitPromise.RejectIfExists(WaitForDataRejectValue(MediaData::AUDIO_DATA, WaitForDataRejectValue::CANCELED), __func__); mVideoWaitPromise.RejectIfExists(WaitForDataRejectValue(MediaData::VIDEO_DATA, WaitForDataRejectValue::CANCELED), __func__); // Reset miscellaneous seeking state. mWaitingForSeekData = false; mPendingSeekTime = -1; // Reset force video decode ahead. mForceVideoDecodeAhead = false; // Reset all the readers. if (GetAudioReader()) { GetAudioReader()->ResetDecode(); } if (GetVideoReader()) { GetVideoReader()->ResetDecode(); } return MediaDecoderReader::ResetDecode(); }
void OnVideoDecoded(VideoData* aSample) { if (mDropVideoBeforeThreshold) { if (aSample->mTime < mTimeThreshold) { MSE_DEBUG("%p MSR::OnVideoDecoded VideoData mTime %lld below mTimeThreshold %lld", this, aSample->mTime, mTimeThreshold); delete aSample; GetVideoReader()->RequestVideoData(false, mTimeThreshold); return; } mDropVideoBeforeThreshold = false; } GetCallback()->OnVideoDecoded(aSample); }
bool MaybeSwitchVideoReaders(int64_t aTimeThreshold) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); MOZ_ASSERT(mActiveVideoDecoder != -1); InitializePendingDecoders(); for (uint32_t i = mActiveVideoDecoder + 1; i < mDecoders.Length(); ++i) { if (!mDecoders[i]->GetReader()->GetMediaInfo().HasVideo()) { continue; } if (aTimeThreshold >= mDecoders[i]->GetMediaStartTime()) { GetVideoReader()->SetIdle(); mActiveVideoDecoder = i; MSE_DEBUG("%p MSR::DecodeVF switching to %d", this, mActiveVideoDecoder); return true; } } return false; }
void MediaSourceReader::ReadUpdatedMetadata(MediaInfo* aInfo) { if (mAudioTrack) { MOZ_ASSERT(mAudioTrack->IsReady()); mAudioSourceDecoder = mAudioTrack->Decoders()[0]; const MediaInfo& info = GetAudioReader()->GetMediaInfo(); MOZ_ASSERT(info.HasAudio()); mInfo.mAudio = info.mAudio; } if (mVideoTrack) { MOZ_ASSERT(mVideoTrack->IsReady()); mVideoSourceDecoder = mVideoTrack->Decoders()[0]; const MediaInfo& info = GetVideoReader()->GetMediaInfo(); MOZ_ASSERT(info.HasVideo()); mInfo.mVideo = info.mVideo; } *aInfo = mInfo; }
bool MediaSourceReader::IsActiveReader(MediaDecoderReader* aReader) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); return aReader && (aReader == GetVideoReader() || aReader == GetAudioReader()); }