CASpeexDecoder::CASpeexDecoder(Boolean inSkipFormatsInitialization /* = false */) : mCookie(NULL), mCookieSize(0), mCompressionInitialized(false), mOutBuffer(NULL), mOutBufferSize(0), mOutBufferUsedSize(0), mOutBufferStart(0), mSpeexFPList(), mNumFrames(0), mSpeexDecoderState(NULL) { mSpeexStereoState.balance = 1.0; mSpeexStereoState.e_ratio = 0.5; mSpeexStereoState.smooth_left = 1.0; mSpeexStereoState.smooth_right = 1.0; if (inSkipFormatsInitialization) return; CAStreamBasicDescription theInputFormat(kAudioStreamAnyRate, kAudioFormatXiphSpeex, kSpeexBytesPerPacket, kSpeexFramesPerPacket, kSpeexBytesPerFrame, kSpeexChannelsPerFrame, kSpeexBitsPerChannel, kSpeexFormatFlags); AddInputFormat(theInputFormat); mInputFormat.mSampleRate = 44100; mInputFormat.mFormatID = kAudioFormatXiphSpeex; mInputFormat.mFormatFlags = kSpeexFormatFlags; mInputFormat.mBytesPerPacket = kSpeexBytesPerPacket; mInputFormat.mFramesPerPacket = kSpeexFramesPerPacket; mInputFormat.mBytesPerFrame = kSpeexBytesPerFrame; mInputFormat.mChannelsPerFrame = 2; mInputFormat.mBitsPerChannel = 16; CAStreamBasicDescription theOutputFormat1(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 16, kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked); AddOutputFormat(theOutputFormat1); CAStreamBasicDescription theOutputFormat2(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 32, kAudioFormatFlagsNativeFloatPacked); AddOutputFormat(theOutputFormat2); mOutputFormat.mSampleRate = 44100; mOutputFormat.mFormatID = kAudioFormatLinearPCM; mOutputFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked; mOutputFormat.mBytesPerPacket = 8; mOutputFormat.mFramesPerPacket = 1; mOutputFormat.mBytesPerFrame = 8; mOutputFormat.mChannelsPerFrame = 2; mOutputFormat.mBitsPerChannel = 32; }
CAOggFLACDecoder::CAOggFLACDecoder() : CAFLACDecoder(true), mFramesBufferedList(), complete_pages(0) { CAStreamBasicDescription theInputFormat(kAudioStreamAnyRate, kAudioFormatXiphOggFramedFLAC, kFLACBytesPerPacket, kFLACFramesPerPacket, kFLACBytesPerFrame, kFLACChannelsPerFrame, kFLACBitsPerChannel, kFLACFormatFlags); AddInputFormat(theInputFormat); mInputFormat.mSampleRate = 44100; mInputFormat.mFormatID = kAudioFormatXiphOggFramedFLAC; mInputFormat.mFormatFlags = kFLACFormatFlags; mInputFormat.mBytesPerPacket = kFLACBytesPerPacket; mInputFormat.mFramesPerPacket = kFLACFramesPerPacket; mInputFormat.mBytesPerFrame = kFLACBytesPerFrame; mInputFormat.mChannelsPerFrame = 2; mInputFormat.mBitsPerChannel = kFLACBitsPerChannel; CAStreamBasicDescription theOutputFormat1(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 16, kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked); AddOutputFormat(theOutputFormat1); CAStreamBasicDescription theOutputFormat2(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 32, kAudioFormatFlagsNativeFloatPacked); AddOutputFormat(theOutputFormat2); mOutputFormat.mSampleRate = 44100; mOutputFormat.mFormatID = kAudioFormatLinearPCM; mOutputFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked; mOutputFormat.mBytesPerPacket = 8; mOutputFormat.mFramesPerPacket = 1; mOutputFormat.mBytesPerFrame = 8; mOutputFormat.mChannelsPerFrame = 2; mOutputFormat.mBitsPerChannel = 32; }