void FFmpegAACDecoder<LIBAV_VER>::DecodePacket(MP4Sample* aSample) { AVPacket packet; av_init_packet(&packet); aSample->Pad(FF_INPUT_BUFFER_PADDING_SIZE); packet.data = aSample->data; packet.size = aSample->size; packet.pos = aSample->byte_offset; if (!PrepareFrame()) { NS_WARNING("FFmpeg audio decoder failed to allocate frame."); mCallback->Error(); return; } int decoded; int bytesConsumed = avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet); if (bytesConsumed < 0 || !decoded) { NS_WARNING("FFmpeg audio decoder error."); mCallback->Error(); return; } NS_ASSERTION(bytesConsumed == (int)aSample->size, "Only one audio packet should be received at a time."); uint32_t numChannels = mCodecContext->channels; nsAutoArrayPtr<AudioDataValue> audio( CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples)); nsAutoPtr<AudioData> data( new AudioData(packet.pos, aSample->composition_timestamp, aSample->duration, mFrame->nb_samples, audio.forget(), numChannels)); mCallback->Output(data.forget()); if (mTaskQueue->IsEmpty()) { mCallback->InputExhausted(); } }
void FFmpegAACDecoder::DecodePacket(MP4Sample* aSample) { nsAutoPtr<AVFrame> frame(avcodec_alloc_frame()); avcodec_get_frame_defaults(frame); AVPacket packet; av_init_packet(&packet); packet.data = aSample->data; packet.size = aSample->size; packet.pos = aSample->byte_offset; int decoded; int bytesConsumed = avcodec_decode_audio4(&mCodecContext, frame.get(), &decoded, &packet); if (bytesConsumed < 0 || !decoded) { NS_WARNING("FFmpeg audio decoder error."); mCallback->Error(); return; } NS_ASSERTION(bytesConsumed == (int)aSample->size, "Only one audio packet should be received at a time."); uint32_t numChannels = mCodecContext.channels; nsAutoArrayPtr<AudioDataValue> audio( CopyAndPackAudio(frame.get(), numChannels, frame->nb_samples)); nsAutoPtr<AudioData> data( new AudioData(packet.pos, aSample->composition_timestamp, aSample->duration, frame->nb_samples, audio.forget(), numChannels)); mCallback->Output(data.forget()); if (mTaskQueue->IsEmpty()) { mCallback->InputExhausted(); } }
MediaResult FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample) { AVPacket packet; mLib->av_init_packet(&packet); packet.data = const_cast<uint8_t*>(aSample->Data()); packet.size = aSample->Size(); if (!PrepareFrame()) { return MediaResult( NS_ERROR_OUT_OF_MEMORY, RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame")); } int64_t samplePosition = aSample->mOffset; media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime); while (packet.size > 0) { int decoded; int bytesConsumed = mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet); if (bytesConsumed < 0) { NS_WARNING("FFmpeg audio decoder error."); return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed)); } if (mFrame->format != AV_SAMPLE_FMT_FLT && mFrame->format != AV_SAMPLE_FMT_FLTP && mFrame->format != AV_SAMPLE_FMT_S16 && mFrame->format != AV_SAMPLE_FMT_S16P && mFrame->format != AV_SAMPLE_FMT_S32 && mFrame->format != AV_SAMPLE_FMT_S32P) { return MediaResult( NS_ERROR_DOM_MEDIA_DECODE_ERR, RESULT_DETAIL("FFmpeg audio decoder outputs unsupported audio format")); } if (decoded) { uint32_t numChannels = mCodecContext->channels; AudioConfig::ChannelLayout layout(numChannels); if (!layout.IsValid()) { return MediaResult( NS_ERROR_DOM_MEDIA_FATAL_ERR, RESULT_DETAIL("Unsupported channel layout:%u", numChannels)); } uint32_t samplingRate = mCodecContext->sample_rate; AlignedAudioBuffer audio = CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples); if (!audio) { return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } media::TimeUnit duration = FramesToTimeUnit(mFrame->nb_samples, samplingRate); if (!duration.IsValid()) { return MediaResult( NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, RESULT_DETAIL("Invalid sample duration")); } RefPtr<AudioData> data = new AudioData(samplePosition, pts.ToMicroseconds(), duration.ToMicroseconds(), mFrame->nb_samples, Move(audio), numChannels, samplingRate); mCallback->Output(data); pts += duration; if (!pts.IsValid()) { return MediaResult( NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, RESULT_DETAIL("Invalid count of accumulated audio samples")); } } packet.data += bytesConsumed; packet.size -= bytesConsumed; samplePosition += bytesConsumed; } return NS_OK; }
void FFmpegAudioDecoder<LIBAV_VER>::DecodePacket(MediaRawData* aSample) { AVPacket packet; av_init_packet(&packet); packet.data = const_cast<uint8_t*>(aSample->Data()); packet.size = aSample->Size(); if (!PrepareFrame()) { NS_WARNING("FFmpeg audio decoder failed to allocate frame."); mCallback->Error(); return; } int64_t samplePosition = aSample->mOffset; media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime); while (packet.size > 0) { int decoded; int bytesConsumed = avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet); if (bytesConsumed < 0) { NS_WARNING("FFmpeg audio decoder error."); mCallback->Error(); return; } if (decoded) { uint32_t numChannels = mCodecContext->channels; uint32_t samplingRate = mCodecContext->sample_rate; nsAutoArrayPtr<AudioDataValue> audio( CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples)); media::TimeUnit duration = FramesToTimeUnit(mFrame->nb_samples, samplingRate); if (!duration.IsValid()) { NS_WARNING("Invalid count of accumulated audio samples"); mCallback->Error(); return; } nsRefPtr<AudioData> data = new AudioData(samplePosition, pts.ToMicroseconds(), duration.ToMicroseconds(), mFrame->nb_samples, audio.forget(), numChannels, samplingRate); mCallback->Output(data); pts += duration; if (!pts.IsValid()) { NS_WARNING("Invalid count of accumulated audio samples"); mCallback->Error(); return; } } packet.data += bytesConsumed; packet.size -= bytesConsumed; samplePosition += bytesConsumed; } if (mTaskQueue->IsEmpty()) { mCallback->InputExhausted(); } }
void FFmpegAudioDecoder<LIBAV_VER>::DecodePacket(MediaRawData* aSample) { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); AVPacket packet; mLib->av_init_packet(&packet); packet.data = const_cast<uint8_t*>(aSample->Data()); packet.size = aSample->Size(); if (!PrepareFrame()) { NS_WARNING("FFmpeg audio decoder failed to allocate frame."); mCallback->Error(); return; } int64_t samplePosition = aSample->mOffset; media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime); while (packet.size > 0) { int decoded; int bytesConsumed = mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet); if (bytesConsumed < 0) { NS_WARNING("FFmpeg audio decoder error."); mCallback->Error(); return; } if (decoded) { uint32_t numChannels = mCodecContext->channels; AudioConfig::ChannelLayout layout(numChannels); if (!layout.IsValid()) { mCallback->Error(); return; } uint32_t samplingRate = mCodecContext->sample_rate; AlignedAudioBuffer audio = CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples); media::TimeUnit duration = FramesToTimeUnit(mFrame->nb_samples, samplingRate); if (!audio || !duration.IsValid()) { NS_WARNING("Invalid count of accumulated audio samples"); mCallback->Error(); return; } RefPtr<AudioData> data = new AudioData(samplePosition, pts.ToMicroseconds(), duration.ToMicroseconds(), mFrame->nb_samples, Move(audio), numChannels, samplingRate); mCallback->Output(data); pts += duration; if (!pts.IsValid()) { NS_WARNING("Invalid count of accumulated audio samples"); mCallback->Error(); return; } } packet.data += bytesConsumed; packet.size -= bytesConsumed; samplePosition += bytesConsumed; } if (mTaskQueue->IsEmpty()) { mCallback->InputExhausted(); } }
MediaResult FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame, DecodedData& aResults) { AVPacket packet; mLib->av_init_packet(&packet); packet.data = const_cast<uint8_t*>(aData); packet.size = aSize; if (aGotFrame) { *aGotFrame = false; } if (!PrepareFrame()) { return MediaResult( NS_ERROR_OUT_OF_MEMORY, RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame")); } int64_t samplePosition = aSample->mOffset; media::TimeUnit pts = aSample->mTime; while (packet.size > 0) { int decoded; int bytesConsumed = mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet); if (bytesConsumed < 0) { NS_WARNING("FFmpeg audio decoder error."); return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed)); } if (decoded) { if (mFrame->format != AV_SAMPLE_FMT_FLT && mFrame->format != AV_SAMPLE_FMT_FLTP && mFrame->format != AV_SAMPLE_FMT_S16 && mFrame->format != AV_SAMPLE_FMT_S16P && mFrame->format != AV_SAMPLE_FMT_S32 && mFrame->format != AV_SAMPLE_FMT_S32P) { return MediaResult( NS_ERROR_DOM_MEDIA_DECODE_ERR, RESULT_DETAIL( "FFmpeg audio decoder outputs unsupported audio format")); } uint32_t numChannels = mCodecContext->channels; uint32_t samplingRate = mCodecContext->sample_rate; AlignedAudioBuffer audio = CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples); if (!audio) { return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } media::TimeUnit duration = FramesToTimeUnit(mFrame->nb_samples, samplingRate); if (!duration.IsValid()) { return MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, RESULT_DETAIL("Invalid sample duration")); } media::TimeUnit newpts = pts + duration; if (!newpts.IsValid()) { return MediaResult( NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, RESULT_DETAIL("Invalid count of accumulated audio samples")); } aResults.AppendElement(new AudioData(samplePosition, pts, duration, mFrame->nb_samples, Move(audio), numChannels, samplingRate, mCodecContext->channel_layout)); pts = newpts; if (aGotFrame) { *aGotFrame = true; } } packet.data += bytesConsumed; packet.size -= bytesConsumed; samplePosition += bytesConsumed; } return NS_OK; }