void MediaDecoderReader::RequestAudioData() { while (AudioQueue().GetSize() == 0 && !AudioQueue().IsFinished()) { if (!DecodeAudioData()) { AudioQueue().Finish(); break; } // AudioQueue size is still zero, post a task to try again. Don't spin // waiting in this while loop since it somehow prevents audio EOS from // coming in gstreamer 1.x when there is still video buffer waiting to be // consumed. (|mVideoSinkBufferCount| > 0) if (AudioQueue().GetSize() == 0 && mTaskQueue) { RefPtr<nsIRunnable> task(NS_NewRunnableMethod( this, &MediaDecoderReader::RequestAudioData)); mTaskQueue->Dispatch(task.forget()); return; } } if (AudioQueue().GetSize() > 0) { nsRefPtr<AudioData> a = AudioQueue().PopFront(); if (mAudioDiscontinuity) { a->mDiscontinuity = true; mAudioDiscontinuity = false; } GetCallback()->OnAudioDecoded(a); return; } else if (AudioQueue().IsFinished()) { GetCallback()->OnNotDecoded(MediaData::AUDIO_DATA, END_OF_STREAM); return; } }
RefPtr<MediaDecoderReader::MediaDataPromise> MediaDecoderReader::RequestAudioData() { RefPtr<MediaDataPromise> p = mBaseAudioPromise.Ensure(__func__); while (AudioQueue().GetSize() == 0 && !AudioQueue().IsFinished()) { if (!DecodeAudioData()) { AudioQueue().Finish(); break; } // AudioQueue size is still zero, post a task to try again. Don't spin // waiting in this while loop since it somehow prevents audio EOS from // coming in gstreamer 1.x when there is still video buffer waiting to be // consumed. (|mVideoSinkBufferCount| > 0) if (AudioQueue().GetSize() == 0) { RefPtr<nsIRunnable> task(new ReRequestAudioTask(this)); mTaskQueue->Dispatch(task.forget()); return p; } } if (AudioQueue().GetSize() > 0) { RefPtr<AudioData> a = AudioQueue().PopFront(); mBaseAudioPromise.Resolve(a, __func__); } else if (AudioQueue().IsFinished()) { mBaseAudioPromise.Reject(mHitAudioDecodeError ? NS_ERROR_DOM_MEDIA_FATAL_ERR : NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); mHitAudioDecodeError = false; } else { MOZ_ASSERT(false, "Dropping this promise on the floor"); } return p; }
nsRefPtr<MediaDecoderReader::AudioDataPromise> MediaDecoderReader::RequestAudioData() { nsRefPtr<AudioDataPromise> p = mBaseAudioPromise.Ensure(__func__); while (AudioQueue().GetSize() == 0 && !AudioQueue().IsFinished()) { if (!DecodeAudioData()) { AudioQueue().Finish(); break; } // AudioQueue size is still zero, post a task to try again. // (|mVideoSinkBufferCount| > 0) if (AudioQueue().GetSize() == 0 && mTaskQueue) { RefPtr<nsIRunnable> task(new ReRequestAudioTask(this)); mTaskQueue->Dispatch(task); return p; } } if (AudioQueue().GetSize() > 0) { nsRefPtr<AudioData> a = AudioQueue().PopFront(); if (mAudioDiscontinuity) { a->mDiscontinuity = true; mAudioDiscontinuity = false; } mBaseAudioPromise.Resolve(a, __func__); } else if (AudioQueue().IsFinished()) { mBaseAudioPromise.Reject(mHitAudioDecodeError ? DECODE_ERROR : END_OF_STREAM, __func__); mHitAudioDecodeError = false; } else { MOZ_ASSERT(false, "Dropping this promise on the floor"); } return p; }
HX_RESULT CPCMAudioFormat::DecodeAudioData(HXAudioData& audioData, HXBOOL bFlushCodec) { return DecodeAudioData(audioData, bFlushCodec, CAudioFormat::GetAudioPacket()); }
AudioData* MediaDecoderReader::DecodeToFirstAudioData() { bool eof = false; while (!eof && AudioQueue().GetSize() == 0) { { ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor()); if (mDecoder->IsShutdown()) { return nullptr; } } eof = !DecodeAudioData(); } AudioData* d = nullptr; return (d = AudioQueue().PeekFront()) ? d : nullptr; }
nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget) { DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) Begin", aTarget)); // Decode forward to the target frame. Start with video, if we have it. if (HasVideo()) { bool eof = false; int64_t startTime = -1; nsAutoPtr<VideoData> video; while (HasVideo() && !eof) { while (VideoQueue().GetSize() == 0 && !eof) { bool skip = false; eof = !DecodeVideoFrame(skip, 0); { ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor()); if (mDecoder->IsShutdown()) { return NS_ERROR_FAILURE; } } } if (VideoQueue().GetSize() == 0) { // Hit end of file, we want to display the last frame of the video. if (video) { VideoQueue().PushFront(video.forget()); } break; } video = VideoQueue().PeekFront(); // If the frame end time is less than the seek target, we won't want // to display this frame after the seek, so discard it. if (video && video->GetEndTime() <= aTarget) { if (startTime == -1) { startTime = video->mTime; } VideoQueue().PopFront(); } else { video.forget(); break; } } { ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor()); if (mDecoder->IsShutdown()) { return NS_ERROR_FAILURE; } } DECODER_LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld", startTime)); } if (HasAudio()) { // Decode audio forward to the seek target. bool eof = false; while (HasAudio() && !eof) { while (!eof && AudioQueue().GetSize() == 0) { eof = !DecodeAudioData(); { ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor()); if (mDecoder->IsShutdown()) { return NS_ERROR_FAILURE; } } } const AudioData* audio = AudioQueue().PeekFront(); if (!audio) break; CheckedInt64 startFrame = UsecsToFrames(audio->mTime, mInfo.mAudio.mRate); CheckedInt64 targetFrame = UsecsToFrames(aTarget, mInfo.mAudio.mRate); if (!startFrame.isValid() || !targetFrame.isValid()) { return NS_ERROR_FAILURE; } if (startFrame.value() + audio->mFrames <= targetFrame.value()) { // Our seek target lies after the frames in this AudioData. Pop it // off the queue, and keep decoding forwards. delete AudioQueue().PopFront(); audio = nullptr; continue; } if (startFrame.value() > targetFrame.value()) { // The seek target doesn't lie in the audio block just after the last // audio frames we've seen which were before the seek target. This // could have been the first audio data we've seen after seek, i.e. the // seek terminated after the seek target in the audio stream. Just // abort the audio decode-to-target, the state machine will play // silence to cover the gap. Typically this happens in poorly muxed // files. NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?"); break; } // The seek target lies somewhere in this AudioData's frames, strip off // any frames which lie before the seek target, so we'll begin playback // exactly at the seek target. NS_ASSERTION(targetFrame.value() >= startFrame.value(), "Target must at or be after data start."); NS_ASSERTION(targetFrame.value() < startFrame.value() + audio->mFrames, "Data must end after target."); int64_t framesToPrune = targetFrame.value() - startFrame.value(); if (framesToPrune > audio->mFrames) { // We've messed up somehow. Don't try to trim frames, the |frames| // variable below will overflow. NS_WARNING("Can't prune more frames that we have!"); break; } uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune); uint32_t channels = audio->mChannels; nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]); memcpy(audioData.get(), audio->mAudioData.get() + (framesToPrune * channels), frames * channels * sizeof(AudioDataValue)); CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate); if (!duration.isValid()) { return NS_ERROR_FAILURE; } nsAutoPtr<AudioData> data(new AudioData(audio->mOffset, aTarget, duration.value(), frames, audioData.forget(), channels)); delete AudioQueue().PopFront(); AudioQueue().PushFront(data.forget()); break; } } DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) End", aTarget)); return NS_OK; }