Beispiel #1
0
CASpeexDecoder::CASpeexDecoder(Boolean inSkipFormatsInitialization /* = false */) :
    mCookie(NULL), mCookieSize(0), mCompressionInitialized(false),
    mOutBuffer(NULL), mOutBufferSize(0), mOutBufferUsedSize(0), mOutBufferStart(0),
    mSpeexFPList(),
    mNumFrames(0),
    mSpeexDecoderState(NULL)
{
    mSpeexStereoState.balance = 1.0;
    mSpeexStereoState.e_ratio = 0.5;
    mSpeexStereoState.smooth_left = 1.0;
    mSpeexStereoState.smooth_right = 1.0;

    if (inSkipFormatsInitialization)
        return;

    CAStreamBasicDescription theInputFormat(kAudioStreamAnyRate, kAudioFormatXiphSpeex,
                                            kSpeexBytesPerPacket, kSpeexFramesPerPacket,
                                            kSpeexBytesPerFrame, kSpeexChannelsPerFrame,
                                            kSpeexBitsPerChannel, kSpeexFormatFlags);
    AddInputFormat(theInputFormat);

    mInputFormat.mSampleRate = 44100;
    mInputFormat.mFormatID = kAudioFormatXiphSpeex;
    mInputFormat.mFormatFlags = kSpeexFormatFlags;
    mInputFormat.mBytesPerPacket = kSpeexBytesPerPacket;
    mInputFormat.mFramesPerPacket = kSpeexFramesPerPacket;
    mInputFormat.mBytesPerFrame = kSpeexBytesPerFrame;
    mInputFormat.mChannelsPerFrame = 2;
    mInputFormat.mBitsPerChannel = 16;

    CAStreamBasicDescription theOutputFormat1(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 16,
                                              kAudioFormatFlagsNativeEndian |
                                              kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked);
    AddOutputFormat(theOutputFormat1);
    CAStreamBasicDescription theOutputFormat2(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 32,
                                              kAudioFormatFlagsNativeFloatPacked);
    AddOutputFormat(theOutputFormat2);

    mOutputFormat.mSampleRate = 44100;
    mOutputFormat.mFormatID = kAudioFormatLinearPCM;
    mOutputFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
    mOutputFormat.mBytesPerPacket = 8;
    mOutputFormat.mFramesPerPacket = 1;
    mOutputFormat.mBytesPerFrame = 8;
    mOutputFormat.mChannelsPerFrame = 2;
    mOutputFormat.mBitsPerChannel = 32;
}
Beispiel #2
0
CAOggFLACDecoder::CAOggFLACDecoder() :
    CAFLACDecoder(true),
    mFramesBufferedList(),
    complete_pages(0)
{
    CAStreamBasicDescription theInputFormat(kAudioStreamAnyRate, kAudioFormatXiphOggFramedFLAC,
                                            kFLACBytesPerPacket, kFLACFramesPerPacket,
                                            kFLACBytesPerFrame, kFLACChannelsPerFrame,
                                            kFLACBitsPerChannel, kFLACFormatFlags);
    AddInputFormat(theInputFormat);

    mInputFormat.mSampleRate = 44100;
    mInputFormat.mFormatID = kAudioFormatXiphOggFramedFLAC;
    mInputFormat.mFormatFlags = kFLACFormatFlags;
    mInputFormat.mBytesPerPacket = kFLACBytesPerPacket;
    mInputFormat.mFramesPerPacket = kFLACFramesPerPacket;
    mInputFormat.mBytesPerFrame = kFLACBytesPerFrame;
    mInputFormat.mChannelsPerFrame = 2;
    mInputFormat.mBitsPerChannel = kFLACBitsPerChannel;

    CAStreamBasicDescription theOutputFormat1(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 16,
                                              kAudioFormatFlagsNativeEndian |
                                              kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked);
    AddOutputFormat(theOutputFormat1);
    CAStreamBasicDescription theOutputFormat2(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 32,
                                              kAudioFormatFlagsNativeFloatPacked);
    AddOutputFormat(theOutputFormat2);

    mOutputFormat.mSampleRate = 44100;
    mOutputFormat.mFormatID = kAudioFormatLinearPCM;
    mOutputFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
    mOutputFormat.mBytesPerPacket = 8;
    mOutputFormat.mFramesPerPacket = 1;
    mOutputFormat.mBytesPerFrame = 8;
    mOutputFormat.mChannelsPerFrame = 2;
    mOutputFormat.mBitsPerChannel = 32;
}
ACFLACEncoder::ACFLACEncoder(OSType theSubType)
:
	ACFLACCodec(kInputBufferPackets * kFramesPerPacket * sizeof(SInt16), theSubType)
{	
	//	This encoder only accepts (16- or 24-bit) native endian signed integers as it's input,
	//	but can handle any sample rate and any number of channels
	CAStreamBasicDescription theInputFormat1(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 16, kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked);
	AddInputFormat(theInputFormat1);
	
	CAStreamBasicDescription theInputFormat2(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 24, kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked);
	AddInputFormat(theInputFormat2);

	// These are some additional formats that FLAC can support
	//CAStreamBasicDescription theInputFormat3(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 32, kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked);
	//AddInputFormat(theInputFormat3);

	//CAStreamBasicDescription theInputFormat4(kAudioStreamAnyRate, kAudioFormatLinearPCM, 0, 1, 0, 0, 20, kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsAlignedHigh);
	//AddInputFormat(theInputFormat4);
	
	//	set our intial input format to stereo 32 bit native endian signed integer at a 44100 sample rate
	mInputFormat.mSampleRate = 44100;
	mInputFormat.mFormatID = kAudioFormatLinearPCM;
	mInputFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
	mInputFormat.mBytesPerPacket = 4;
	mInputFormat.mFramesPerPacket = 1;
	mInputFormat.mBytesPerFrame = 4;
	mInputFormat.mChannelsPerFrame = 2;
	mInputFormat.mBitsPerChannel = 16;
	
	//	This encoder only puts out a FLAC stream
	CAStreamBasicDescription theOutputFormat1(kAudioStreamAnyRate, kAudioFormatFLAC, 0, kFramesPerPacket, 0, 0, 0, 0);
	AddOutputFormat(theOutputFormat1);

	//	set our intial output format to stereo FLAC at a 44100 sample rate -- note however the 16 bit bit depth
	mOutputFormat.mSampleRate = 44100;
	mOutputFormat.mFormatFlags = kFLACFormatFlag_16BitSourceData;
	mOutputFormat.mBytesPerPacket = 0;
	mOutputFormat.mFramesPerPacket = kFramesPerPacket;
	mOutputFormat.mFormatID = kAudioFormatFLAC;
	mOutputFormat.mBytesPerFrame = 0;
	mOutputFormat.mChannelsPerFrame = 2;
	mOutputFormat.mBitsPerChannel = 0;
	
	mSupportedChannelTotals[0] = 1;
	mSupportedChannelTotals[1] = 2;
	mSupportedChannelTotals[2] = 3;
	mSupportedChannelTotals[3] = 4;
	mSupportedChannelTotals[4] = 5;
	mSupportedChannelTotals[5] = 6;
	mSupportedChannelTotals[6] = 7;
	mSupportedChannelTotals[7] = 8;
	
	mPacketInInputBuffer = false;

	mFormat = 0;

	mTotalBytesGenerated = 0;
	
	mQuality = 0; // Compression Quality
	mInputBufferBytesUsed = 0;
	mFlushPacket = false;
	mFinished = false;
	mTrailingFrames = 0;
	mBitDepth = 16;
	mEncoder = FLAC__stream_encoder_new();
	mEncoderState = FLAC__stream_encoder_get_state(mEncoder);
}