void H264Converter::DecodeFirstSample(MediaRawData* aSample) { if (mNeedKeyframe && !aSample->mKeyframe) { mDecodePromise.Resolve(DecodedData(), __func__); return; } mNeedAVCC = Some(mDecoder->NeedsConversion() == ConversionRequired::kNeedAVCC); if (!*mNeedAVCC && !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)) { mDecodePromise.Reject( MediaResult(NS_ERROR_OUT_OF_MEMORY, RESULT_DETAIL("ConvertSampleToAnnexB")), __func__); return; } mNeedKeyframe = false; RefPtr<H264Converter> self = this; mDecoder->Decode(aSample) ->Then(AbstractThread::GetCurrent()->AsTaskQueue(), __func__, [self, this](const MediaDataDecoder::DecodedData& aResults) { mDecodePromiseRequest.Complete(); mDecodePromise.Resolve(aResults, __func__); }, [self, this](const MediaResult& aError) { mDecodePromiseRequest.Complete(); mDecodePromise.Reject(aError, __func__); }) ->Track(mDecodePromiseRequest); }
RefPtr<MediaDataDecoder::DecodePromise> VorbisDataDecoder::Drain() { return InvokeAsync(mTaskQueue, __func__, [] { return DecodePromise::CreateAndResolve(DecodedData(), __func__); }); }
RefPtr<MediaDataDecoder::DecodePromise> AppleATDecoder::Drain() { LOG("Draining AudioToolbox AAC decoder"); RefPtr<AppleATDecoder> self = this; return InvokeAsync(mTaskQueue, __func__, [] { return DecodePromise::CreateAndResolve(DecodedData(), __func__); }); }
RefPtr<MediaDataDecoder::DecodePromise> H264Converter::Drain() { mNeedKeyframe = true; if (mDecoder) { return mDecoder->Drain(); } return DecodePromise::CreateAndResolve(DecodedData(), __func__); }
RefPtr<MediaDataDecoder::DecodePromise> H264Converter::Drain() { MOZ_RELEASE_ASSERT(!mDrainRequest.Exists()); mNeedKeyframe = true; if (mDecoder) { return mDecoder->Drain(); } return DecodePromise::CreateAndResolve(DecodedData(), __func__); }
RefPtr<MediaDataDecoder::DecodePromise> GMPVideoDecoder::Drain() { MOZ_ASSERT(IsOnGMPThread()); MOZ_ASSERT(mDecodePromise.IsEmpty(), "Must wait for decoding to complete"); RefPtr<DecodePromise> p = mDrainPromise.Ensure(__func__); if (!mGMP || NS_FAILED(mGMP->Drain())) { mDrainPromise.Resolve(DecodedData(), __func__); } return p; }
RefPtr<MediaDataDecoder::DecodePromise> VorbisDataDecoder::ProcessDecode(MediaRawData* aSample) { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); const unsigned char* aData = aSample->Data(); size_t aLength = aSample->Size(); int64_t aOffset = aSample->mOffset; MOZ_ASSERT(mPacketCount >= 3); if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) { // We are starting a new block. mFrames = 0; mLastFrameTime = Some(aSample->mTime.ToMicroseconds()); } ogg_packet pkt = InitVorbisPacket( aData, aLength, false, aSample->mEOS, aSample->mTimecode.ToMicroseconds(), mPacketCount++); int err = vorbis_synthesis(&mVorbisBlock, &pkt); if (err) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, RESULT_DETAIL("vorbis_synthesis:%d", err)), __func__); } err = vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock); if (err) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, RESULT_DETAIL("vorbis_synthesis_blockin:%d", err)), __func__); } VorbisPCMValue** pcm = 0; int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm); if (frames == 0) { return DecodePromise::CreateAndResolve(DecodedData(), __func__); } DecodedData results; while (frames > 0) { uint32_t channels = mVorbisDsp.vi->channels; uint32_t rate = mVorbisDsp.vi->rate; AlignedAudioBuffer buffer(frames*channels); if (!buffer) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__); } for (uint32_t j = 0; j < channels; ++j) { VorbisPCMValue* channel = pcm[j]; for (uint32_t i = 0; i < uint32_t(frames); ++i) { buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]); } } auto duration = FramesToTimeUnit(frames, rate); if (!duration.IsValid()) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, RESULT_DETAIL("Overflow converting audio duration")), __func__); } auto total_duration = FramesToTimeUnit(mFrames, rate); if (!total_duration.IsValid()) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, RESULT_DETAIL("Overflow converting audio total_duration")), __func__); } auto time = total_duration + aSample->mTime; if (!time.IsValid()) { return DecodePromise::CreateAndReject( MediaResult( NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, RESULT_DETAIL("Overflow adding total_duration and aSample->mTime")), __func__); }; if (!mAudioConverter) { AudioConfig in( AudioConfig::ChannelLayout(channels, VorbisLayout(channels)), rate); AudioConfig out(channels, rate); if (!in.IsValid() || !out.IsValid()) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, RESULT_DETAIL("Invalid channel layout:%u", channels)), __func__); } mAudioConverter = MakeUnique<AudioConverter>(in, out); } MOZ_ASSERT(mAudioConverter->CanWorkInPlace()); AudioSampleBuffer data(Move(buffer)); data = mAudioConverter->Process(Move(data)); results.AppendElement(new AudioData(aOffset, time, duration, frames, data.Forget(), channels, rate)); mFrames += frames; err = vorbis_synthesis_read(&mVorbisDsp, frames); if (err) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, RESULT_DETAIL("vorbis_synthesis_read:%d", err)), __func__); } frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm); } return DecodePromise::CreateAndResolve(Move(results), __func__); }
RefPtr<MediaDataDecoder::DecodePromise> H264Converter::Decode(MediaRawData* aSample) { MOZ_RELEASE_ASSERT(mFlushPromise.IsEmpty(), "Flush operatin didn't complete"); MOZ_RELEASE_ASSERT(!mDecodePromiseRequest.Exists() && !mInitPromiseRequest.Exists(), "Can't request a new decode until previous one completed"); if (!AnnexB::ConvertSampleToAVCC(aSample)) { // We need AVCC content to be able to later parse the SPS. // This is a no-op if the data is already AVCC. return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_OUT_OF_MEMORY, RESULT_DETAIL("ConvertSampleToAVCC")), __func__); } if (!AnnexB::IsAVCC(aSample)) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, RESULT_DETAIL("Invalid H264 content")), __func__); } MediaResult rv(NS_OK); if (!mDecoder) { // It is not possible to create an AVCC H264 decoder without SPS. // As such, creation will fail if the extra_data just extracted doesn't // contain a SPS. rv = CreateDecoderAndInit(aSample); if (rv == NS_ERROR_NOT_INITIALIZED) { // We are missing the required SPS to create the decoder. // Ignore for the time being, the MediaRawData will be dropped. return DecodePromise::CreateAndResolve(DecodedData(), __func__); } } else { // Initialize the members that we couldn't if the extradata was given during // H264Converter's construction. if (!mNeedAVCC) { mNeedAVCC = Some(mDecoder->NeedsConversion() == ConversionRequired::kNeedAVCC); } if (!mCanRecycleDecoder) { mCanRecycleDecoder = Some(CanRecycleDecoder()); } rv = CheckForSPSChange(aSample); } if (rv == NS_ERROR_DOM_MEDIA_INITIALIZING_DECODER) { // The decoder is pending initialization. RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__); return p; } if (NS_FAILED(rv)) { return DecodePromise::CreateAndReject(rv, __func__); } if (mNeedKeyframe && !aSample->mKeyframe) { return DecodePromise::CreateAndResolve(DecodedData(), __func__); } auto res = !*mNeedAVCC ? AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe) : Ok(); if (res.isErr()) { return DecodePromise::CreateAndReject( MediaResult(res.unwrapErr(), RESULT_DETAIL("ConvertSampleToAnnexB")), __func__); } mNeedKeyframe = false; aSample->mExtraData = mCurrentConfig.mExtraData; return mDecoder->Decode(aSample); }
RefPtr<MediaDataDecoder::DecodePromise> H264Converter::Decode(MediaRawData* aSample) { MOZ_RELEASE_ASSERT(!mDecodePromiseRequest.Exists() && !mInitPromiseRequest.Exists(), "Can't request a new decode until previous one completed"); if (!mp4_demuxer::AnnexB::ConvertSampleToAVCC(aSample)) { // We need AVCC content to be able to later parse the SPS. // This is a no-op if the data is already AVCC. return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_OUT_OF_MEMORY, RESULT_DETAIL("ConvertSampleToAVCC")), __func__); } nsresult rv; if (!mDecoder) { // It is not possible to create an AVCC H264 decoder without SPS. // As such, creation will fail if the extra_data just extracted doesn't // contain a SPS. rv = CreateDecoderAndInit(aSample); if (rv == NS_ERROR_NOT_INITIALIZED) { // We are missing the required SPS to create the decoder. // Ignore for the time being, the MediaRawData will be dropped. return DecodePromise::CreateAndResolve(DecodedData(), __func__); } } else { rv = CheckForSPSChange(aSample); } if (rv == NS_ERROR_DOM_MEDIA_INITIALIZING_DECODER) { // The decoder is pending initialization. RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__); return p; } if (NS_FAILED(rv)) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, RESULT_DETAIL("Unable to create H264 decoder")), __func__); } if (mNeedKeyframe && !aSample->mKeyframe) { return DecodePromise::CreateAndResolve(DecodedData(), __func__); } if (!mNeedAVCC) { mNeedAVCC = Some(mDecoder->NeedsConversion() == ConversionRequired::kNeedAVCC); } if (!*mNeedAVCC && !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_OUT_OF_MEMORY, RESULT_DETAIL("ConvertSampleToAnnexB")), __func__); } mNeedKeyframe = false; aSample->mExtraData = mCurrentConfig.mExtraData; return mDecoder->Decode(aSample); }