CASpeexDecoder::CASpeexDecoder(Boolean inSkipFormatsInitialization /* = false */) : mCookie(NULL), mCookieSize(0), mCompressionInitialized(false), mOutBuffer(NULL), mOutBufferSize(0), mOutBufferUsedSize(0), mOutBufferStart(0), mSpeexFPList(), mNumFrames(0), mSpeexDecoderState(NULL) { mSpeexStereoState.balance = 1.0; mSpeexStereoState.e_ratio = 0.5; mSpeexStereoState.smooth_left = 1.0; mSpeexStereoState.smooth_right = 1.0; if (inSkipFormatsInitialization) return; CAStreamBasicDescription theInputFormat(kAudioStreamAnyRate, kAudioFormatXiphSpeex, kSpeexBytesPerPacket, kSpeexFramesPerPacket, kSpeexBytesPerFrame, kSpeexChannelsPerFrame, kSpeexBitsPerChannel, kSpeexFormatFlags); AddInputFormat(theInputFormat); mInputFormat.mSampleRate = 44100; mInputFormat.mFormatID = kAudioFormatXiphSpeex; mInputFormat.mFormatFlags = kSpeexFormatFlags; mInputFormat.mBytesPerPacket = kSpeexBytesPerPacket; mInputFormat.mFramesPerPacket = kSpeexFramesPerPacket; mInputFormat.mBytesPerFrame = kSpeexBytesPerFrame; mInputFormat.mChannelsPerFrame = 2; mInputFormat.mBitsPerChannel = 16; CAStreamBasicDescription theOutputFormat1(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 16, kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked); AddOutputFormat(theOutputFormat1); CAStreamBasicDescription theOutputFormat2(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 32, kAudioFormatFlagsNativeFloatPacked); AddOutputFormat(theOutputFormat2); mOutputFormat.mSampleRate = 44100; mOutputFormat.mFormatID = kAudioFormatLinearPCM; mOutputFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked; mOutputFormat.mBytesPerPacket = 8; mOutputFormat.mFramesPerPacket = 1; mOutputFormat.mBytesPerFrame = 8; mOutputFormat.mChannelsPerFrame = 2; mOutputFormat.mBitsPerChannel = 32; }
ACAppleIMA4Encoder::ACAppleIMA4Encoder(AudioComponentInstance inInstance) : ACAppleIMA4Codec(kInputBufferPackets * kIMAFramesPerPacket * SizeOf32(SInt16), inInstance), mEndOfInput(false), mZeroPaddedOnce(false), mZeroesPadded(0) { // This encoder only accepts 16 bit native endian signed integer as it's input, // but can handle any sample rate and any number of channels CAStreamBasicDescription theInputFormat(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 16, kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked); AddInputFormat(theInputFormat); // set our intial input format to mono 16 bit native endian signed integer at a 44100 sample rate mInputFormat.mSampleRate = 44100; mInputFormat.mFormatID = kAudioFormatLinearPCM; mInputFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; mInputFormat.mBytesPerPacket = 2; mInputFormat.mFramesPerPacket = 1; mInputFormat.mBytesPerFrame = 2; mInputFormat.mChannelsPerFrame = 1; mInputFormat.mBitsPerChannel = 16; // This encoder only puts out an Acme IMA4 stream CAStreamBasicDescription theOutputFormat(kAudioStreamAnyRate, 'DEMO', 0, kIMAFramesPerPacket, 0, 0, 0, 0); AddOutputFormat(theOutputFormat); // set our intial output format to mono Apple IMA4 at a 44100 sample rate mOutputFormat.mSampleRate = 44100; mOutputFormat.mFormatID = 'DEMO'; mOutputFormat.mFormatFlags = 0; mOutputFormat.mBytesPerPacket = kIMA4PacketBytes; mOutputFormat.mFramesPerPacket = kIMAFramesPerPacket; mOutputFormat.mBytesPerFrame = 0; mOutputFormat.mChannelsPerFrame = 1; mOutputFormat.mBitsPerChannel = 0; mSupportedChannelTotals[0] = 1; mSupportedChannelTotals[1] = 2; // initialize our channel state InitializeChannelStateList(); }
CAOggFLACDecoder::CAOggFLACDecoder() : CAFLACDecoder(true), mFramesBufferedList(), complete_pages(0) { CAStreamBasicDescription theInputFormat(kAudioStreamAnyRate, kAudioFormatXiphOggFramedFLAC, kFLACBytesPerPacket, kFLACFramesPerPacket, kFLACBytesPerFrame, kFLACChannelsPerFrame, kFLACBitsPerChannel, kFLACFormatFlags); AddInputFormat(theInputFormat); mInputFormat.mSampleRate = 44100; mInputFormat.mFormatID = kAudioFormatXiphOggFramedFLAC; mInputFormat.mFormatFlags = kFLACFormatFlags; mInputFormat.mBytesPerPacket = kFLACBytesPerPacket; mInputFormat.mFramesPerPacket = kFLACFramesPerPacket; mInputFormat.mBytesPerFrame = kFLACBytesPerFrame; mInputFormat.mChannelsPerFrame = 2; mInputFormat.mBitsPerChannel = kFLACBitsPerChannel; CAStreamBasicDescription theOutputFormat1(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 16, kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked); AddOutputFormat(theOutputFormat1); CAStreamBasicDescription theOutputFormat2(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 32, kAudioFormatFlagsNativeFloatPacked); AddOutputFormat(theOutputFormat2); mOutputFormat.mSampleRate = 44100; mOutputFormat.mFormatID = kAudioFormatLinearPCM; mOutputFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked; mOutputFormat.mBytesPerPacket = 8; mOutputFormat.mFramesPerPacket = 1; mOutputFormat.mBytesPerFrame = 8; mOutputFormat.mChannelsPerFrame = 2; mOutputFormat.mBitsPerChannel = 32; }