VideoData* MediaDecoderReader::FindStartTime(int64_t& aOutStartTime) { NS_ASSERTION(mDecoder->OnStateMachineThread() || mDecoder->OnDecodeThread(), "Should be on state machine or decode thread."); // Extract the start times of the bitstreams in order to calculate // the duration. int64_t videoStartTime = INT64_MAX; int64_t audioStartTime = INT64_MAX; VideoData* videoData = nullptr; if (HasVideo()) { videoData = DecodeToFirstVideoData(); if (videoData) { videoStartTime = videoData->mTime; DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::FindStartTime() video=%lld", videoStartTime)); } } if (HasAudio()) { AudioData* audioData = DecodeToFirstAudioData(); if (audioData) { audioStartTime = audioData->mTime; DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::FindStartTime() audio=%lld", audioStartTime)); } } int64_t startTime = std::min(videoStartTime, audioStartTime); if (startTime != INT64_MAX) { aOutStartTime = startTime; } return videoData; }
void MediaOmxCommonReader::CheckAudioOffload() { MOZ_ASSERT(OnTaskQueue()); char offloadProp[128]; property_get("audio.offload.disable", offloadProp, "0"); bool offloadDisable = atoi(offloadProp) != 0; if (offloadDisable) { return; } sp<MediaSource> audioOffloadTrack = GetAudioOffloadTrack(); sp<MetaData> meta = audioOffloadTrack.get() ? audioOffloadTrack->getFormat() : nullptr; // Supporting audio offload only when there is no video, no streaming bool hasNoVideo = !HasVideo(); bool isNotStreaming = mDecoder->GetResource()->IsDataCachedToEndOfResource(0); // Not much benefit in trying to offload other channel types. Most of them // aren't supported and also duration would be less than a minute bool isTypeMusic = mAudioChannel == dom::AudioChannel::Content; DECODER_LOG(LogLevel::Debug, ("%s meta %p, no video %d, no streaming %d," " channel type %d", __FUNCTION__, meta.get(), hasNoVideo, isNotStreaming, mAudioChannel)); if ((meta.get()) && hasNoVideo && isNotStreaming && isTypeMusic && canOffloadStream(meta, false, false, AUDIO_STREAM_MUSIC) && !IsMonoAudioEnabled()) { DECODER_LOG(LogLevel::Debug, ("Can offload this audio stream")); mDecoder->SetPlatformCanOffloadAudio(true); } }
/** * If this is an MP3 stream, pass any new data we get to the MP3 frame parser * for duration estimation. */ void GStreamerReader::NotifyDataArrivedInternal(uint32_t aLength, int64_t aOffset) { MOZ_ASSERT(OnTaskQueue()); if (HasVideo()) { return; } if (!mMP3FrameParser.NeedsData()) { return; } nsRefPtr<MediaByteBuffer> bytes = mResource.MediaReadAt(aOffset, aLength); NS_ENSURE_TRUE_VOID(bytes); mMP3FrameParser.Parse(bytes->Elements(), aLength, aOffset); if (!mMP3FrameParser.IsMP3()) { return; } int64_t duration = mMP3FrameParser.GetDuration(); if (duration != mLastParserDuration && mUseParserDuration) { MOZ_ASSERT(mDecoder); mLastParserDuration = duration; mDecoder->DispatchUpdateEstimatedMediaDuration(mLastParserDuration); } }
void HTMLVideoElement::UpdateScreenWakeLock() { bool hidden = OwnerDoc()->Hidden(); if (mScreenWakeLock && (mPaused || hidden || !mUseScreenWakeLock)) { ErrorResult rv; mScreenWakeLock->Unlock(rv); rv.SuppressException(); mScreenWakeLock = nullptr; return; } if (!mScreenWakeLock && !mPaused && !hidden && mUseScreenWakeLock && HasVideo()) { RefPtr<power::PowerManagerService> pmService = power::PowerManagerService::GetInstance(); NS_ENSURE_TRUE_VOID(pmService); ErrorResult rv; mScreenWakeLock = pmService->NewWakeLock(NS_LITERAL_STRING("screen"), OwnerDoc()->GetInnerWindow(), rv); } }
void SeekTask::CheckIfSeekComplete() { AssertOwnerThread(); const bool videoSeekComplete = IsVideoSeekComplete(); if (HasVideo() && !videoSeekComplete) { // We haven't reached the target. Ensure we have requested another sample. if (NS_FAILED(EnsureVideoDecodeTaskQueued())) { DECODER_WARN("Failed to request video during seek"); RejectIfExist(__func__); } } const bool audioSeekComplete = IsAudioSeekComplete(); if (HasAudio() && !audioSeekComplete) { // We haven't reached the target. Ensure we have requested another sample. if (NS_FAILED(EnsureAudioDecodeTaskQueued())) { DECODER_WARN("Failed to request audio during seek"); RejectIfExist(__func__); } } SAMPLE_LOG("CheckIfSeekComplete() audioSeekComplete=%d videoSeekComplete=%d", audioSeekComplete, videoSeekComplete); if (audioSeekComplete && videoSeekComplete) { Resolve(__func__); // Call to MDSM::SeekCompleted(); } }
void MP4Reader::NotifyDataArrived(const char* aBuffer, uint32_t aLength, int64_t aOffset) { MOZ_ASSERT(NS_IsMainThread()); if (mShutdown) { return; } if (mLastSeenEnd < 0) { MonitorAutoLock mon(mDemuxerMonitor); mLastSeenEnd = mDecoder->GetResource()->GetLength(); if (mLastSeenEnd < 0) { // We dont have a length. Demuxer would have been blocking already. return; } } int64_t end = aOffset + aLength; if (end <= mLastSeenEnd) { return; } mLastSeenEnd = end; if (HasVideo()) { auto& decoder = GetDecoderData(TrackInfo::kVideoTrack); MonitorAutoLock lock(decoder.mMonitor); decoder.mDemuxEOS = false; } if (HasAudio()) { auto& decoder = GetDecoderData(TrackInfo::kAudioTrack); MonitorAutoLock lock(decoder.mMonitor); decoder.mDemuxEOS = false; } }
void MediaOmxReader::NotifyDataArrivedInternal(uint32_t aLength, int64_t aOffset) { MOZ_ASSERT(OnTaskQueue()); nsRefPtr<AbstractMediaDecoder> decoder = SafeGetDecoder(); if (!decoder) { // reader has shut down return; } if (HasVideo()) { return; } if (!mMP3FrameParser.NeedsData()) { return; } nsRefPtr<MediaByteBuffer> bytes = mDecoder->GetResource()->MediaReadAt(aOffset, aLength); NS_ENSURE_TRUE_VOID(bytes); mMP3FrameParser.Parse(bytes->Elements(), aLength, aOffset); if (!mMP3FrameParser.IsMP3()) { return; } int64_t duration = mMP3FrameParser.GetDuration(); if (duration != mLastParserDuration) { mLastParserDuration = duration; decoder->DispatchUpdateEstimatedMediaDuration(mLastParserDuration); } }
bool MP4Reader::EnsureDecodersSetup() { if (CheckIfDecoderSetup()) { return true; } if (mIsEncrypted) { // EME not supported. return false; } else { // mPlatform doesn't need to be recreated when resuming from dormant. if (!mPlatform) { mPlatform = PlatformDecoderModule::Create(); NS_ENSURE_TRUE(mPlatform, false); } } if (HasAudio()) { NS_ENSURE_TRUE(IsSupportedAudioMimeType(mDemuxer->AudioConfig().mMimeType), false); mAudio.mDecoder = mPlatform->CreateDecoder(mDemuxer->AudioConfig(), mAudio.mTaskQueue, mAudio.mCallback); NS_ENSURE_TRUE(mAudio.mDecoder != nullptr, false); nsresult rv = mAudio.mDecoder->Init(); NS_ENSURE_SUCCESS(rv, false); } if (HasVideo()) { NS_ENSURE_TRUE(IsSupportedVideoMimeType(mDemuxer->VideoConfig().mMimeType), false); if (mSharedDecoderManager && mPlatform->SupportsSharedDecoders(mDemuxer->VideoConfig())) { mVideo.mDecoder = mSharedDecoderManager->CreateVideoDecoder(mPlatform, mDemuxer->VideoConfig(), mLayersBackendType, mDecoder->GetImageContainer(), mVideo.mTaskQueue, mVideo.mCallback); } else { mVideo.mDecoder = mPlatform->CreateDecoder(mDemuxer->VideoConfig(), mVideo.mTaskQueue, mVideo.mCallback, mLayersBackendType, mDecoder->GetImageContainer()); } NS_ENSURE_TRUE(mVideo.mDecoder != nullptr, false); nsresult rv = mVideo.mDecoder->Init(); NS_ENSURE_SUCCESS(rv, false); } return true; }
bool SeekTask::IsVideoSeekComplete() { AssertOwnerThread(); SAMPLE_LOG("IsVideoSeekComplete() curTarVal=%d mVidDis=%d vqFin=%d vqSz=%d", mSeekJob.Exists(), mDropVideoUntilNextDiscontinuity, mIsVideoQueueFinished, !!mSeekedVideoData); return !HasVideo() || (Exists() && !mDropVideoUntilNextDiscontinuity && (mIsVideoQueueFinished || mSeekedVideoData)); }
bool MP4Reader::CheckIfDecoderSetup() { if (!mDemuxerInitialized) { return false; } if (HasAudio() && !mAudio.mDecoder) { return false; } if (HasVideo() && !mVideo.mDecoder) { return false; } return true; }
void MP4Reader::DisableHardwareAcceleration() { if (HasVideo() && mSharedDecoderManager) { mSharedDecoderManager->DisableHardwareAcceleration(); const VideoInfo& video = mDemuxer->VideoConfig(); if (!mSharedDecoderManager->Recreate(video)) { MonitorAutoLock mon(mVideo.mMonitor); mVideo.mError = true; if (mVideo.HasPromise()) { mVideo.RejectPromise(DECODE_ERROR, __func__); } } else { MonitorAutoLock lock(mVideo.mMonitor); ScheduleUpdate(TrackInfo::kVideoTrack); } } }
void MediaOmxReader::NotifyDataArrivedInternal() { MOZ_ASSERT(OnTaskQueue()); RefPtr<AbstractMediaDecoder> decoder = SafeGetDecoder(); if (!decoder) { // reader has shut down return; } if (HasVideo()) { return; } if (!mMP3FrameParser.NeedsData()) { return; } AutoPinned<MediaResource> resource(mDecoder->GetResource()); MediaByteRangeSet byteRanges; nsresult rv = resource->GetCachedRanges(byteRanges); if (NS_FAILED(rv)) { return; } if (byteRanges == mLastCachedRanges) { return; } MediaByteRangeSet intervals = byteRanges - mLastCachedRanges; mLastCachedRanges = byteRanges; for (const auto& interval : intervals) { RefPtr<MediaByteBuffer> bytes = resource->MediaReadAt(interval.mStart, interval.Length()); NS_ENSURE_TRUE_VOID(bytes); mMP3FrameParser.Parse(bytes->Elements(), interval.Length(), interval.mStart); if (!mMP3FrameParser.IsMP3()) { return; } } int64_t duration = mMP3FrameParser.GetDuration(); if (duration != mLastParserDuration) { mLastParserDuration = duration; decoder->DispatchUpdateEstimatedMediaDuration(mLastParserDuration); } }
void MP4Reader::DisableHardwareAcceleration() { if (HasVideo() && mSharedDecoderManager) { mSharedDecoderManager->DisableHardwareAcceleration(); const VideoDecoderConfig& video = mDemuxer->VideoConfig(); if (!mSharedDecoderManager->Recreate(video, mLayersBackendType, mDecoder->GetImageContainer())) { MonitorAutoLock mon(mVideo.mMonitor); mVideo.mError = true; if (mVideo.HasPromise()) { mVideo.RejectPromise(DECODE_ERROR, __func__); } } else { MonitorAutoLock lock(mVideo.mMonitor); ScheduleUpdate(kVideo); } } }
void MediaOmxReader::NotifyDataArrived(const char* aBuffer, uint32_t aLength, int64_t aOffset) { MOZ_ASSERT(NS_IsMainThread()); if (HasVideo()) { return; } if (!mMP3FrameParser.NeedsData()) { return; } mMP3FrameParser.Parse(aBuffer, aLength, aOffset); int64_t duration = mMP3FrameParser.GetDuration(); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); if (duration != mLastParserDuration && mUseParserDuration) { mLastParserDuration = duration; mDecoder->UpdateEstimatedMediaDuration(mLastParserDuration); } }
nsRefPtr<MediaDecoderReader::VideoDataPromise> MP4Reader::RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold) { MOZ_ASSERT(GetTaskQueue()->IsCurrentThreadIn()); VLOG("skip=%d time=%lld", aSkipToNextKeyframe, aTimeThreshold); if (!EnsureDecodersSetup()) { NS_WARNING("Error constructing MP4 decoders"); return VideoDataPromise::CreateAndReject(DECODE_ERROR, __func__); } if (mShutdown) { NS_WARNING("RequestVideoData on shutdown MP4Reader!"); return VideoDataPromise::CreateAndReject(CANCELED, __func__); } MOZ_ASSERT(HasVideo() && mPlatform && mVideo.mDecoder); bool eos = false; if (ShouldSkip(aSkipToNextKeyframe, aTimeThreshold)) { uint32_t parsed = 0; eos = !SkipVideoDemuxToNextKeyFrame(aTimeThreshold, parsed); if (!eos && NS_FAILED(mVideo.mDecoder->Flush())) { NS_WARNING("Failed to skip/flush video when skipping-to-next-keyframe."); } mDecoder->NotifyDecodedFrames(parsed, 0, parsed); } MonitorAutoLock lock(mVideo.mMonitor); nsRefPtr<VideoDataPromise> p = mVideo.mPromise.Ensure(__func__); if (mVideo.mError) { mVideo.mPromise.Reject(DECODE_ERROR, __func__); } else if (eos) { mVideo.mPromise.Reject(END_OF_STREAM, __func__); } else { ScheduleUpdate(kVideo); } return p; }
SeekTask::SeekTask(const void* aDecoderID, AbstractThread* aThread, MediaDecoderReaderWrapper* aReader, SeekJob&& aSeekJob, const MediaInfo& aInfo, const media::TimeUnit& aDuration, int64_t aCurrentMediaTime) : mDecoderID(aDecoderID) , mOwnerThread(aThread) , mReader(aReader) , mSeekJob(Move(aSeekJob)) , mCurrentTimeBeforeSeek(aCurrentMediaTime) , mAudioRate(aInfo.mAudio.mRate) , mHasAudio(aInfo.HasAudio()) , mHasVideo(aInfo.HasVideo()) , mDropAudioUntilNextDiscontinuity(false) , mDropVideoUntilNextDiscontinuity(false) , mIsDiscarded(false) , mIsAudioQueueFinished(false) , mIsVideoQueueFinished(false) , mNeedToStopPrerollingAudio(false) , mNeedToStopPrerollingVideo(false) { // Bound the seek time to be inside the media range. int64_t end = aDuration.ToMicroseconds(); NS_ASSERTION(end != -1, "Should know end time by now"); int64_t seekTime = mSeekJob.mTarget.GetTime().ToMicroseconds(); seekTime = std::min(seekTime, end); seekTime = std::max(int64_t(0), seekTime); NS_ASSERTION(seekTime >= 0 && seekTime <= end, "Can only seek in range [0,duration]"); mSeekJob.mTarget.SetTime(media::TimeUnit::FromMicroseconds(seekTime)); mDropAudioUntilNextDiscontinuity = HasAudio(); mDropVideoUntilNextDiscontinuity = HasVideo(); // Configure MediaDecoderReaderWrapper. SetMediaDecoderReaderWrapperCallback(); }
VideoData* nsDASHReader::FindStartTime(int64_t& aOutStartTime) { NS_ASSERTION(mDecoder->OnStateMachineThread() || mDecoder->OnDecodeThread(), "Should be on state machine or decode thread."); // Extract the start times of the bitstreams in order to calculate // the duration. int64_t videoStartTime = INT64_MAX; int64_t audioStartTime = INT64_MAX; VideoData* videoData = nullptr; ReentrantMonitorConditionallyEnter mon(!mDecoder->OnDecodeThread(), mDecoder->GetReentrantMonitor()); if (HasVideo()) { // Forward to video reader. videoData = mVideoReader->DecodeToFirstVideoData(); if (videoData) { videoStartTime = videoData->mTime; } } if (HasAudio()) { // Forward to audio reader. AudioData* audioData = mAudioReader->DecodeToFirstAudioData(); if (audioData) { audioStartTime = audioData->mTime; } } int64_t startTime = NS_MIN(videoStartTime, audioStartTime); if (startTime != INT64_MAX) { aOutStartTime = startTime; } return videoData; }
void HTMLVideoElement::UpdateScreenWakeLock() { bool hidden = OwnerDoc()->Hidden(); if (mScreenWakeLock && (mPaused || hidden)) { ErrorResult rv; mScreenWakeLock->Unlock(rv); NS_WARN_IF_FALSE(!rv.Failed(), "Failed to unlock the wakelock."); mScreenWakeLock = nullptr; return; } if (!mScreenWakeLock && !mPaused && !hidden && HasVideo()) { nsRefPtr<power::PowerManagerService> pmService = power::PowerManagerService::GetInstance(); NS_ENSURE_TRUE_VOID(pmService); ErrorResult rv; mScreenWakeLock = pmService->NewWakeLock(NS_LITERAL_STRING("screen"), OwnerDoc()->GetInnerWindow(), rv); } }
nsresult MP4Reader::ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) { if (!mDemuxerInitialized) { MonitorAutoLock mon(mDemuxerMonitor); bool ok = InvokeAndRetry(this, &MP4Reader::InitDemuxer, mStream, &mDemuxerMonitor); NS_ENSURE_TRUE(ok, NS_ERROR_FAILURE); mIndexReady = true; // To decode, we need valid video and a place to put it. mInfo.mVideo.mHasVideo = mVideo.mActive = mDemuxer->HasValidVideo() && mDecoder->GetImageContainer(); if (mVideo.mActive) { mVideo.mTrackDemuxer = new MP4VideoDemuxer(mDemuxer); } mInfo.mAudio.mHasAudio = mAudio.mActive = mDemuxer->HasValidAudio(); if (mAudio.mActive) { mAudio.mTrackDemuxer = new MP4AudioDemuxer(mDemuxer); } mCrypto = mDemuxer->Crypto(); { MonitorAutoUnlock unlock(mDemuxerMonitor); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); mInfo.mCrypto.mIsEncrypted = mIsEncrypted = mCrypto.valid; } // Remember that we've initialized the demuxer, so that if we're decoding // an encrypted stream and we need to wait for a CDM to be set, we don't // need to reinit the demuxer. mDemuxerInitialized = true; } else if (mPlatform && !IsWaitingMediaResources()) { *aInfo = mInfo; *aTags = nullptr; return NS_OK; } if (HasAudio()) { const AudioDecoderConfig& audio = mDemuxer->AudioConfig(); mInfo.mAudio.mRate = audio.samples_per_second; mInfo.mAudio.mChannels = audio.channel_count; mAudio.mCallback = new DecoderCallback(this, kAudio); } if (HasVideo()) { const VideoDecoderConfig& video = mDemuxer->VideoConfig(); mInfo.mVideo.mDisplay = nsIntSize(video.display_width, video.display_height); mVideo.mCallback = new DecoderCallback(this, kVideo); // Collect telemetry from h264 AVCC SPS. if (!mFoundSPSForTelemetry) { mFoundSPSForTelemetry = AccumulateSPSTelemetry(video.extra_data); } } if (mIsEncrypted) { nsTArray<uint8_t> initData; ExtractCryptoInitData(initData); if (initData.Length() == 0) { return NS_ERROR_FAILURE; } mInfo.mCrypto.mInitData = initData; mInfo.mCrypto.mType = NS_LITERAL_STRING("cenc"); } // Get the duration, and report it to the decoder if we have it. Microseconds duration; { MonitorAutoLock lock(mDemuxerMonitor); duration = mDemuxer->Duration(); } if (duration != -1) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); mDecoder->SetMediaDuration(duration); } *aInfo = mInfo; *aTags = nullptr; if (!IsWaitingMediaResources() && !IsWaitingOnCDMResource()) { NS_ENSURE_TRUE(EnsureDecodersSetup(), NS_ERROR_FAILURE); } MonitorAutoLock mon(mDemuxerMonitor); UpdateIndex(); return NS_OK; }
bool CApplicationPlayer::IsPlayingVideo() const { return (IsPlaying() && HasVideo()); }
nsresult MP4Reader::ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) { if (!mDemuxerInitialized) { MonitorAutoLock mon(mDemuxerMonitor); bool ok = InvokeAndRetry(this, &MP4Reader::InitDemuxer, mStream, &mDemuxerMonitor); NS_ENSURE_TRUE(ok, NS_ERROR_FAILURE); mIndexReady = true; // To decode, we need valid video and a place to put it. mVideo.mActive = mDemuxer->HasValidVideo() && mDecoder->GetImageContainer(); if (mVideo.mActive) { mVideo.mTrackDemuxer = new MP4VideoDemuxer(mDemuxer); } mAudio.mActive = mDemuxer->HasValidAudio(); if (mAudio.mActive) { mAudio.mTrackDemuxer = new MP4AudioDemuxer(mDemuxer); } mCrypto = mDemuxer->Crypto(); { MonitorAutoUnlock unlock(mDemuxerMonitor); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); mIsEncrypted = mCrypto.valid; } // Remember that we've initialized the demuxer, so that if we're decoding // an encrypted stream and we need to wait for a CDM to be set, we don't // need to reinit the demuxer. mDemuxerInitialized = true; } else if (mPlatform && !IsWaitingMediaResources()) { *aInfo = mInfo; *aTags = nullptr; } if (HasAudio()) { mInfo.mAudio = mDemuxer->AudioConfig(); mAudio.mCallback = new DecoderCallback(this, TrackInfo::kAudioTrack); } if (HasVideo()) { mInfo.mVideo = mDemuxer->VideoConfig(); mVideo.mCallback = new DecoderCallback(this, TrackInfo::kVideoTrack); // Collect telemetry from h264 AVCC SPS. if (!mFoundSPSForTelemetry) { mFoundSPSForTelemetry = AccumulateSPSTelemetry(mInfo.mVideo.mExtraData); } } if (mCrypto.valid) { nsTArray<uint8_t> initData; ExtractCryptoInitData(initData); if (initData.Length() == 0) { return NS_ERROR_FAILURE; } // Add init data to info, will get sent from HTMLMediaElement::MetadataLoaded // (i.e., when transitioning from HAVE_NOTHING to HAVE_METADATA). mInfo.mCrypto.AddInitData(NS_LITERAL_STRING("cenc"), Move(initData)); } // Get the duration, and report it to the decoder if we have it. Microseconds duration; { MonitorAutoLock lock(mDemuxerMonitor); duration = mDemuxer->Duration(); } if (duration != -1) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); mDecoder->SetMediaDuration(duration); } *aInfo = mInfo; *aTags = nullptr; if (!IsWaitingOnCDMResource()) { NS_ENSURE_TRUE(EnsureDecodersSetup(), NS_ERROR_FAILURE); } MonitorAutoLock mon(mDemuxerMonitor); UpdateIndex(); return NS_OK; }
bool SeekTask::IsVideoDecoding() const { AssertOwnerThread(); return HasVideo() && !mIsVideoQueueFinished; }
virtual unsigned char* GetDataPtr() const { HasVideo(true); return pBytes_; }
nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget) { DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) Begin", aTarget)); // Decode forward to the target frame. Start with video, if we have it. if (HasVideo()) { bool eof = false; int64_t startTime = -1; nsAutoPtr<VideoData> video; while (HasVideo() && !eof) { while (VideoQueue().GetSize() == 0 && !eof) { bool skip = false; eof = !DecodeVideoFrame(skip, 0); { ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor()); if (mDecoder->IsShutdown()) { return NS_ERROR_FAILURE; } } } if (VideoQueue().GetSize() == 0) { // Hit end of file, we want to display the last frame of the video. if (video) { VideoQueue().PushFront(video.forget()); } break; } video = VideoQueue().PeekFront(); // If the frame end time is less than the seek target, we won't want // to display this frame after the seek, so discard it. if (video && video->GetEndTime() <= aTarget) { if (startTime == -1) { startTime = video->mTime; } VideoQueue().PopFront(); } else { video.forget(); break; } } { ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor()); if (mDecoder->IsShutdown()) { return NS_ERROR_FAILURE; } } DECODER_LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld", startTime)); } if (HasAudio()) { // Decode audio forward to the seek target. bool eof = false; while (HasAudio() && !eof) { while (!eof && AudioQueue().GetSize() == 0) { eof = !DecodeAudioData(); { ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor()); if (mDecoder->IsShutdown()) { return NS_ERROR_FAILURE; } } } const AudioData* audio = AudioQueue().PeekFront(); if (!audio) break; CheckedInt64 startFrame = UsecsToFrames(audio->mTime, mInfo.mAudio.mRate); CheckedInt64 targetFrame = UsecsToFrames(aTarget, mInfo.mAudio.mRate); if (!startFrame.isValid() || !targetFrame.isValid()) { return NS_ERROR_FAILURE; } if (startFrame.value() + audio->mFrames <= targetFrame.value()) { // Our seek target lies after the frames in this AudioData. Pop it // off the queue, and keep decoding forwards. delete AudioQueue().PopFront(); audio = nullptr; continue; } if (startFrame.value() > targetFrame.value()) { // The seek target doesn't lie in the audio block just after the last // audio frames we've seen which were before the seek target. This // could have been the first audio data we've seen after seek, i.e. the // seek terminated after the seek target in the audio stream. Just // abort the audio decode-to-target, the state machine will play // silence to cover the gap. Typically this happens in poorly muxed // files. NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?"); break; } // The seek target lies somewhere in this AudioData's frames, strip off // any frames which lie before the seek target, so we'll begin playback // exactly at the seek target. NS_ASSERTION(targetFrame.value() >= startFrame.value(), "Target must at or be after data start."); NS_ASSERTION(targetFrame.value() < startFrame.value() + audio->mFrames, "Data must end after target."); int64_t framesToPrune = targetFrame.value() - startFrame.value(); if (framesToPrune > audio->mFrames) { // We've messed up somehow. Don't try to trim frames, the |frames| // variable below will overflow. NS_WARNING("Can't prune more frames that we have!"); break; } uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune); uint32_t channels = audio->mChannels; nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]); memcpy(audioData.get(), audio->mAudioData.get() + (framesToPrune * channels), frames * channels * sizeof(AudioDataValue)); CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate); if (!duration.isValid()) { return NS_ERROR_FAILURE; } nsAutoPtr<AudioData> data(new AudioData(audio->mOffset, aTarget, duration.value(), frames, audioData.forget(), channels)); delete AudioQueue().PopFront(); AudioQueue().PushFront(data.forget()); break; } } DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) End", aTarget)); return NS_OK; }
bool MP4Reader::EnsureDecodersSetup() { if (mAreDecodersSetup) { return !!mPlatform; } if (mIsEncrypted) { #ifdef MOZ_EME // We have encrypted audio or video. We'll need a CDM to decrypt and // possibly decode this. Wait until we've received a CDM from the // JavaScript player app. Note: we still go through the motions here // even if EME is disabled, so that if script tries and fails to create // a CDM, we can detect that and notify chrome and show some UI explaining // that we failed due to EME being disabled. nsRefPtr<CDMProxy> proxy; if (IsWaitingMediaResources()) { return true; } MOZ_ASSERT(!IsWaitingMediaResources()); { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); proxy = mDecoder->GetCDMProxy(); } MOZ_ASSERT(proxy); mPlatform = PlatformDecoderModule::CreateCDMWrapper(proxy, HasAudio(), HasVideo()); NS_ENSURE_TRUE(mPlatform, false); #else // EME not supported. return false; #endif } else { mPlatform = PlatformDecoderModule::Create(); NS_ENSURE_TRUE(mPlatform, false); } if (HasAudio()) { NS_ENSURE_TRUE(IsSupportedAudioMimeType(mDemuxer->AudioConfig().mime_type), false); mAudio.mDecoder = mPlatform->CreateAudioDecoder(mDemuxer->AudioConfig(), mAudio.mTaskQueue, mAudio.mCallback); NS_ENSURE_TRUE(mAudio.mDecoder != nullptr, false); nsresult rv = mAudio.mDecoder->Init(); NS_ENSURE_SUCCESS(rv, false); } if (HasVideo()) { NS_ENSURE_TRUE(IsSupportedVideoMimeType(mDemuxer->VideoConfig().mime_type), false); if (mSharedDecoderManager && mPlatform->SupportsSharedDecoders(mDemuxer->VideoConfig())) { mVideo.mDecoder = mSharedDecoderManager->CreateVideoDecoder(mPlatform, mDemuxer->VideoConfig(), mLayersBackendType, mDecoder->GetImageContainer(), mVideo.mTaskQueue, mVideo.mCallback); } else { mVideo.mDecoder = mPlatform->CreateVideoDecoder(mDemuxer->VideoConfig(), mLayersBackendType, mDecoder->GetImageContainer(), mVideo.mTaskQueue, mVideo.mCallback); } NS_ENSURE_TRUE(mVideo.mDecoder != nullptr, false); nsresult rv = mVideo.mDecoder->Init(); NS_ENSURE_SUCCESS(rv, false); } mAreDecodersSetup = true; return true; }
bool HTMLVideoElement::ShouldCreateVideoWakeLock() const { // Make sure we only request wake lock for video with audio track, because // video without audio track is often used as background image which seems no // need to hold a wakelock. return HasVideo() && HasAudio(); }