nsresult Output(BufferInfo::Param aInfo, void* aBuffer, MediaFormat::Param aFormat, const TimeUnit& aDuration) { // The output on Android is always 16-bit signed nsresult rv; int32_t numChannels; NS_ENSURE_SUCCESS(rv = aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &numChannels), rv); AudioConfig::ChannelLayout layout(numChannels); if (!layout.IsValid()) { return NS_ERROR_FAILURE; } int32_t sampleRate; NS_ENSURE_SUCCESS(rv = aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &sampleRate), rv); int32_t size; NS_ENSURE_SUCCESS(rv = aInfo->Size(&size), rv); int32_t offset; NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv); #ifdef MOZ_SAMPLE_TYPE_S16 const int32_t numSamples = size / 2; #else #error We only support 16-bit integer PCM #endif const int32_t numFrames = numSamples / numChannels; AlignedAudioBuffer audio(numSamples); if (!audio) { return NS_ERROR_OUT_OF_MEMORY; } const uint8_t* bufferStart = static_cast<uint8_t*>(aBuffer) + offset; PodCopy(audio.get(), reinterpret_cast<const AudioDataValue*>(bufferStart), numSamples); int64_t presentationTimeUs; NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv); RefPtr<AudioData> data = new AudioData(0, presentationTimeUs, aDuration.ToMicroseconds(), numFrames, Move(audio), numChannels, sampleRate); INVOKE_CALLBACK(Output, data); return NS_OK; }
AudioDataDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat, MediaDataDecoderCallback* aCallback) : MediaCodecDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType, aFormat, aCallback) { JNIEnv* env = GetJNIForThread(); jni::Object::LocalRef buffer(env); NS_ENSURE_SUCCESS_VOID(aFormat->GetByteBuffer(NS_LITERAL_STRING("csd-0"), &buffer)); if (!buffer && aConfig.mCodecSpecificConfig->Length() >= 2) { buffer = jni::Object::LocalRef::Adopt(env, env->NewDirectByteBuffer(aConfig.mCodecSpecificConfig->Elements(), aConfig.mCodecSpecificConfig->Length())); NS_ENSURE_SUCCESS_VOID(aFormat->SetByteBuffer(NS_LITERAL_STRING("csd-0"), buffer)); } }
AudioDataDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat, MediaDataDecoderCallback* aCallback, const nsString& aDrmStubId) : MediaCodecDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType, aFormat, aCallback, aDrmStubId) { JNIEnv* const env = jni::GetEnvForThread(); jni::ByteBuffer::LocalRef buffer(env); NS_ENSURE_SUCCESS_VOID(aFormat->GetByteBuffer(NS_LITERAL_STRING("csd-0"), &buffer)); if (!buffer && aConfig.mCodecSpecificConfig->Length() >= 2) { buffer = jni::ByteBuffer::New( aConfig.mCodecSpecificConfig->Elements(), aConfig.mCodecSpecificConfig->Length()); NS_ENSURE_SUCCESS_VOID(aFormat->SetByteBuffer(NS_LITERAL_STRING("csd-0"), buffer)); } }
nsresult Output(BufferInfo::Param aInfo, void* aBuffer, MediaFormat::Param aFormat, const media::TimeUnit& aDuration) { // The output on Android is always 16-bit signed nsresult rv; int32_t numChannels; NS_ENSURE_SUCCESS(rv = aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &numChannels), rv); int32_t sampleRate; NS_ENSURE_SUCCESS(rv = aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &sampleRate), rv); int32_t size; NS_ENSURE_SUCCESS(rv = aInfo->Size(&size), rv); const int32_t numFrames = (size / numChannels) / 2; AudioDataValue* audio = new AudioDataValue[size]; PodCopy(audio, static_cast<AudioDataValue*>(aBuffer), size); int32_t offset; NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv); int64_t presentationTimeUs; NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv); nsRefPtr<AudioData> data = new AudioData(offset, presentationTimeUs, aDuration.ToMicroseconds(), numFrames, audio, numChannels, sampleRate); ENVOKE_CALLBACK(Output, data); return NS_OK; }