void AppleATDecoder::ProcessFlush() { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); mQueuedSamples.Clear(); OSStatus rv = AudioConverterReset(mConverter); if (rv) { LOG("Error %d resetting AudioConverter", rv); } }
nsresult AppleATDecoder::Flush() { LOG("Flushing AudioToolbox AAC decoder"); OSStatus rv = AudioConverterReset(mConverter); if (rv) { LOG("Error %d resetting AudioConverter", rv); return NS_ERROR_FAILURE; } return NS_OK; }
bool SFB::Audio::Converter::Reset() { if(!IsOpen()) return false; OSStatus result = AudioConverterReset(mConverter); if(noErr != result) { LOGGER_ERR("org.sbooth.AudioEngine.AudioConverter", "AudioConverterReset failed: " << result); return false; } return true; }
nsresult AppleATDecoder::Flush() { LOG("Flushing AudioToolbox AAC decoder"); mTaskQueue->Flush(); OSStatus rv = AudioConverterReset(mConverter); if (rv) { LOG("Error %d resetting AudioConverter", rv); return NS_ERROR_FAILURE; } // Notify our task queue of the coming input discontinuity. mTaskQueue->Dispatch( NS_NewRunnableMethod(this, &AppleATDecoder::SignalFlush)); return NS_OK; }
void CAAudioFile::SeekToPacket(SInt64 packetNumber) { #if VERBOSE_IO printf("CAAudioFile::SeekToPacket: %qd\n", packetNumber); #endif XThrowIf(mMode != kReading || packetNumber < 0 /*|| packetNumber >= mNumberPackets*/ , kExtAudioFileError_InvalidSeek, "seek to packet in audio file"); if (mPacketMark == packetNumber) return; // already there! don't reset converter mPacketMark = packetNumber; mFrameMark = PacketToFrame(packetNumber) - mFrame0Offset; mFramesToSkipFollowingSeek = 0; if (mConverter) // must reset -- if we reached end of stream. converter will no longer work otherwise AudioConverterReset(mConverter); }
void AudioFile::read(Float32 *data, UInt64 *cursor, UInt32 *numFrames) { AudioFramePacketTranslation t; UInt32 size = sizeof(AudioFramePacketTranslation); t.mFrame = *cursor; AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFrameToPacket, &size, &t); *mCursor = t.mPacket; AudioFramePacketTranslation t2; t2.mFrame = *numFrames; AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFrameToPacket, &size, &t2); UInt32 numPacketsToRead = t2.mPacket ? t2.mPacket : 1; AudioBytePacketTranslation t3; t3.mPacket = numPacketsToRead; size = sizeof(AudioBytePacketTranslation); AudioFileGetProperty(mAudioFileID, kAudioFilePropertyPacketToByte, &size, &t3); if (mConverterBuffer) free(mConverterBuffer); mConverterBuffer = (char*)malloc(t3.mByte); mNumPacketsToRead = numPacketsToRead; UInt32 outNumBytes; checkError(AudioFileReadPackets(mAudioFileID, false, &outNumBytes, mPacketDescs, *mCursor, &numPacketsToRead, mConverterBuffer), "AudioFileReadPackets"); mConvertByteSize = outNumBytes; UInt32 numFramesToConvert = t.mFrameOffsetInPacket + *numFrames; bool interleaved = true; interleaved = !(mClientFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved); AudioBufferList* tmpbuf = AudioSourceNode::createAudioBufferList(2, interleaved, numFramesToConvert, sizeof(Float32)); checkError(AudioConverterFillComplexBuffer(mAudioConverterRef, encoderProc, this, &numFramesToConvert, tmpbuf, NULL), "AudioConverterFillComplexBuffer"); if (interleaved) { Float32* sample = (Float32*)tmpbuf->mBuffers[0].mData; memcpy(data, &sample[t.mFrameOffsetInPacket], numFramesToConvert * sizeof(Float32) * mClientFormat.mChannelsPerFrame); } AudioSourceNode::deleteAudioBufferList(tmpbuf); if (numFramesToConvert == 0) { AudioConverterReset(mAudioConverterRef); } *numFrames = numFramesToConvert; }
void AppleATDecoder::ProcessFlush() { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); mQueuedSamples.Clear(); if (mConverter) { OSStatus rv = AudioConverterReset(mConverter); if (rv) { LOG("Error %d resetting AudioConverter", rv); } } if (mErrored) { mParsedFramesForAACMagicCookie = 0; mMagicCookie.Clear(); ProcessShutdown(); mErrored = false; } }
RefPtr<MediaDataDecoder::FlushPromise> AppleATDecoder::ProcessFlush() { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); mQueuedSamples.Clear(); mDecodedSamples.Clear(); if (mConverter) { OSStatus rv = AudioConverterReset(mConverter); if (rv) { LOG("Error %d resetting AudioConverter", static_cast<int>(rv)); } } if (mErrored) { mParsedFramesForAACMagicCookie = 0; mMagicCookie.Clear(); ProcessShutdown(); mErrored = false; } return FlushPromise::CreateAndResolve(true, __func__); }
void AudioFile::read(AudioBufferList* buf, UInt64* cursor, UInt32* numFrames) { AudioFramePacketTranslation t; UInt32 size = sizeof(AudioFramePacketTranslation); t.mFrame = *cursor; AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFrameToPacket, &size, &t); *mCursor = t.mPacket; UInt32 numFramesToRead = *numFrames;//t.mFrameOffsetInPacket + *numFrames; AudioBufferList* tmpbuf = AudioSourceNode::createAudioBufferList(2, false, numFramesToRead, sizeof(Float32)); checkError(AudioConverterFillComplexBuffer(mAudioConverterRef, encoderProc, this, &numFramesToRead, tmpbuf, NULL), "AudioConverterFillComplexBuffer"); memcpy(buf->mBuffers[0].mData, tmpbuf->mBuffers[0].mData, *numFrames * sizeof(Float32)); memcpy(buf->mBuffers[1].mData, tmpbuf->mBuffers[1].mData, *numFrames * sizeof(Float32)); AudioSourceNode::deleteAudioBufferList(tmpbuf); if (numFramesToRead == 0) { AudioConverterReset(mAudioConverterRef); } *numFrames = numFramesToRead; }
JNIEXPORT jint JNICALL Java_com_apple_audio_toolbox_AudioConverter_AudioConverterReset (JNIEnv *, jclass, jint inAudioConverter) { return (jint)AudioConverterReset((AudioConverterRef)inAudioConverter); }
OSStatus FCoreAudioSoundSource::CoreAudioRenderCallback( void *InRefCon, AudioUnitRenderActionFlags *IOActionFlags, const AudioTimeStamp *InTimeStamp, UInt32 InBusNumber, UInt32 InNumberFrames, AudioBufferList *IOData ) { OSStatus Status = noErr; FCoreAudioSoundSource *Source = ( FCoreAudioSoundSource *)InRefCon; uint32 DataByteSize = InNumberFrames * sizeof( Float32 ); uint32 PacketsRequested = InNumberFrames; uint32 PacketsObtained = 0; // AudioBufferList itself holds only one buffer, while AudioConverterFillComplexBuffer expects a couple of them struct { AudioBufferList BufferList; AudioBuffer AdditionalBuffers[5]; } LocalBuffers; AudioBufferList *LocalBufferList = &LocalBuffers.BufferList; LocalBufferList->mNumberBuffers = IOData->mNumberBuffers; if( Source->Buffer && Source->Playing ) { while( PacketsObtained < PacketsRequested ) { int32 BufferFilledBytes = PacketsObtained * sizeof( Float32 ); for( uint32 Index = 0; Index < LocalBufferList->mNumberBuffers; Index++ ) { LocalBufferList->mBuffers[Index].mDataByteSize = DataByteSize - BufferFilledBytes; LocalBufferList->mBuffers[Index].mData = ( uint8 *)IOData->mBuffers[Index].mData + BufferFilledBytes; } uint32 PacketCount = PacketsRequested - PacketsObtained; Status = AudioConverterFillComplexBuffer( Source->CoreAudioConverter, &CoreAudioConvertCallback, InRefCon, &PacketCount, LocalBufferList, NULL ); PacketsObtained += PacketCount; if( PacketCount == 0 || Status != noErr ) { AudioConverterReset( Source->CoreAudioConverter ); break; } } if( PacketsObtained == 0 ) { *IOActionFlags |= kAudioUnitRenderAction_OutputIsSilence; } } else { *IOActionFlags |= kAudioUnitRenderAction_OutputIsSilence; } if( PacketsObtained < PacketsRequested ) { // Fill the rest of buffers provided with zeroes int32 BufferFilledBytes = PacketsObtained * sizeof( Float32 ); for( uint32 Index = 0; Index < IOData->mNumberBuffers; ++Index ) { FMemory::Memzero( ( uint8 *)IOData->mBuffers[Index].mData + BufferFilledBytes, DataByteSize - BufferFilledBytes ); } } return Status; }