void ACFLACCodec::SetMagicCookie(const void* inMagicCookieData, UInt32 inMagicCookieDataByteSize)
{

	if(mIsInitialized)
	{
		CODEC_THROW(kAudioCodecStateError);
	}
	if(inMagicCookieDataByteSize > 256) // the largest cookie we can store
	{
		CODEC_THROW(kAudioCodecBadPropertySizeError);
	}
	else // store the cookie
	{
		memcpy (mMagicCookie, (const void *)(inMagicCookieData), inMagicCookieDataByteSize);
		mMagicCookieLength = inMagicCookieDataByteSize;
		mCookieSet = 1;
	}
	
	ParseMagicCookie(inMagicCookieData, inMagicCookieDataByteSize, &mStreamInfo);
	
	if (inMagicCookieDataByteSize > 0)
	{
		mCookieDefined = true;
	}
	else
	{
		mCookieDefined = false;
	}
}
void	ACAppleIMA4Encoder::SetCurrentInputFormat(const AudioStreamBasicDescription& inInputFormat)
{
	if(!mIsInitialized)
	{
		//	check to make sure the input format is legal
		if(	(inInputFormat.mFormatID != kAudioFormatLinearPCM) ||
			(inInputFormat.mFormatFlags != (kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked)) ||
			(inInputFormat.mBitsPerChannel != 16))
		{
	#if VERBOSE
			DebugMessage("ACAppleIMA4Encoder::SetCurrentInputFormat: only support 16 bit native endian signed integer for input");
	#endif
			CODEC_THROW(kAudioCodecUnsupportedFormatError);
		}
		
		// Do some basic sanity checking
		if(inInputFormat.mSampleRate < 0.0)
		{
	#if VERBOSE
			DebugMessage("ACAppleIMA4Encoder::SetCurrentInputFormat: input sample rates may not be negative");
	#endif
			CODEC_THROW(kAudioCodecUnsupportedFormatError);
		}
		
		if(inInputFormat.mChannelsPerFrame > kMaxIMA4Channels)
		{
	#if VERBOSE
			DebugMessage("ACAppleIMA4Encoder::SetCurrentInputFormat: only supports mono or stereo");
	#endif
			CODEC_THROW(kAudioCodecUnsupportedFormatError);
		}
		
		//	tell our base class about the new format
		ACAppleIMA4Codec::SetCurrentInputFormat(inInputFormat);
		// The encoder does no sample rate conversion nor channel manipulation
		if (inInputFormat.mChannelsPerFrame == 0)
		{
			mInputFormat.mChannelsPerFrame = mOutputFormat.mChannelsPerFrame;
		}
		else
		{
			mOutputFormat.mChannelsPerFrame = mInputFormat.mChannelsPerFrame;
		}
		if (inInputFormat.mSampleRate == 0.0)
		{
			mInputFormat.mSampleRate = mOutputFormat.mSampleRate;
		}
		else
		{
			mOutputFormat.mSampleRate = mInputFormat.mSampleRate;
		}
		// Fix derived values
		mInputFormat.mBytesPerFrame = mInputFormat.mBytesPerPacket = (mInputFormat.mBitsPerChannel >> 3) * mInputFormat.mChannelsPerFrame;
		mInputFormat.mFramesPerPacket = 1;
		
		// Zero out everything that has to be zero
		mInputFormat.mReserved = 0;
	}
	else
	{
void ACFLACCodec::SetProperty(AudioCodecPropertyID inPropertyID, UInt32 inPropertyDataSize, const void* inPropertyData)
{
	switch(inPropertyID)
	{
        case kAudioCodecPropertyCurrentInputSampleRate:
			if(mIsInitialized)
			{
				CODEC_THROW(kAudioCodecIllegalOperationError);
			}
			if(inPropertyDataSize == sizeof(Float64))
			{
				mInputFormat.mSampleRate = *((Float64*)inPropertyData);
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

		case kAudioCodecPropertyFormatInfo:
		case kAudioCodecPropertyHasVariablePacketByteSizes:
		case kAudioCodecPropertyCurrentOutputSampleRate:
		case kAudioCodecPropertyAvailableInputChannelLayouts:
		case kAudioCodecPropertyAvailableOutputChannelLayouts:
		case kAudioCodecPropertyPacketFrameSize:
		case kAudioCodecPropertyMaximumPacketByteSize:
			CODEC_THROW(kAudioCodecIllegalOperationError);
			break;
		default:
			ACBaseCodec::SetProperty(inPropertyID, inPropertyDataSize, inPropertyData);
			break;            
	}
}
void	ACSimpleCodec::AppendInputData(const void* inInputData, UInt32& ioInputDataByteSize, UInt32& ioNumberPackets, const AudioStreamPacketDescription* inPacketDescription)
{
	//	this buffer handling code doesn't care about such things as the packet descriptions
	if(!mIsInitialized) CODEC_THROW(kAudioCodecStateError);
	
	//	this is a ring buffer we're dealing with, so we need to set up a few things
	UInt32 theUsedByteSize = GetUsedInputBufferByteSize();
	UInt32 theAvailableByteSize = GetInputBufferByteSize() - theUsedByteSize;

	UInt32 theMaxAvailableInputBytes = ioInputDataByteSize; // we can't consume more than we get

	const Byte* theInputData = static_cast<const Byte*>(inInputData);
	
	// >>jamesmcc: added this because ioNumberPackets was not being updated if less was taken than given.
	// THIS ASSUMES CBR!
	UInt32 bytesPerPacketOfInput = mInputFormat.mBytesPerPacket;
	UInt32 theAvailablePacketSize = theAvailableByteSize / bytesPerPacketOfInput;
	
	UInt32 minPacketSize = ioNumberPackets < theAvailablePacketSize ? ioNumberPackets : theAvailablePacketSize;
	UInt32 minByteSize = minPacketSize * bytesPerPacketOfInput;
	
	//	we can copy only as much data as there is or up to how much space is availiable
	ioNumberPackets = minPacketSize;
	ioInputDataByteSize = minByteSize;
	
	// ioInputDataByteSize had better be <= to theMaxAvailableInputBytes or we're screwed
	if (ioInputDataByteSize > theMaxAvailableInputBytes)
	{
		CODEC_THROW(kAudioCodecStateError);
	}
	// <<jamesmcc 
	
	//	now we have to copy the data taking into account the wrap around and where the start is
	if(mInputBufferEnd + ioInputDataByteSize < mInputBufferByteSize)
	{
		//	no wrap around here
		memcpy(mInputBuffer + mInputBufferEnd, theInputData, ioInputDataByteSize);
		
		//	adjust the end point
		mInputBufferEnd += ioInputDataByteSize;
	}
	else
	{
		//	the copy will wrap
		
		//	copy the first part
		UInt32 theBeforeWrapByteSize = mInputBufferByteSize - mInputBufferEnd;
		memcpy(mInputBuffer + mInputBufferEnd, theInputData, theBeforeWrapByteSize);
		
		//	and the rest
		UInt32 theAfterWrapByteSize = ioInputDataByteSize - theBeforeWrapByteSize;
		memcpy(mInputBuffer, theInputData + theBeforeWrapByteSize, theAfterWrapByteSize);
		
		//	adjust the end point
		mInputBufferEnd = theAfterWrapByteSize;
	}
	
}
Exemple #5
0
void CAOggFLACDecoder::SetCurrentInputFormat(const AudioStreamBasicDescription& inInputFormat)
{
    if (!mIsInitialized) {
        if (inInputFormat.mFormatID != kAudioFormatXiphOggFramedFLAC) {
            dbg_printf("CAOggFLACDecoder::SetFormats: only support Xiph FLAC (Ogg-framed) for input\n");
            CODEC_THROW(kAudioCodecUnsupportedFormatError);
        }
        XCACodec::SetCurrentInputFormat(inInputFormat);
    } else {
        CODEC_THROW(kAudioCodecStateError);
    }
}
Exemple #6
0
void	ACShepA52Codec::GetProperty(AudioCodecPropertyID inPropertyID, UInt32& ioPropertyDataSize, void* outPropertyData) {
    switch(inPropertyID) {
    case kAudioCodecPropertyManufacturerCFString:
    {
        if (ioPropertyDataSize != sizeof(CFStringRef)) {
            CODEC_THROW(kAudioCodecBadPropertySizeError);
        }

        CFStringRef name = CFCopyLocalizedStringFromTableInBundle(CFSTR("Shepmaster Productions"), CFSTR("CodecNames"), GetCodecBundle(), CFSTR(""));
        *(CFStringRef*)outPropertyData = name;
        break;
    }

    case kAudioCodecPropertyMaximumPacketByteSize:

        if(ioPropertyDataSize == sizeof(UInt32)) {
            *reinterpret_cast<UInt32*>(outPropertyData) = 3840; //Stolen from liba52 docs
        } else {
            CODEC_THROW(kAudioCodecBadPropertySizeError);
        }

        break;
    case kAudioCodecPropertyRequiresPacketDescription:

        if(ioPropertyDataSize == sizeof(UInt32)) {
            *reinterpret_cast<UInt32*>(outPropertyData) = 0;
        } else {
            CODEC_THROW(kAudioCodecBadPropertySizeError);
        }

        break;
    case kAudioCodecPropertyHasVariablePacketByteSizes:

        if(ioPropertyDataSize == sizeof(UInt32)) {
            *reinterpret_cast<UInt32*>(outPropertyData) = 1;
        } else {
            CODEC_THROW(kAudioCodecBadPropertySizeError);
        }

        break;
    case kAudioCodecPropertyPacketFrameSize:

        if(ioPropertyDataSize == sizeof(UInt32)) {
            *reinterpret_cast<UInt32*>(outPropertyData) = 6 * 256; // A frame has 6 blocks of 256 samples
        } else {
            CODEC_THROW(kAudioCodecBadPropertySizeError);
        }

        break;
    default:
        ACSimpleCodec::GetProperty(inPropertyID, ioPropertyDataSize, outPropertyData);
    }
}
Exemple #7
0
void CASpeexDecoder::SetMagicCookie(const void* inMagicCookieData, UInt32 inMagicCookieDataByteSize)
{
    dbg_printf(" >> [%08lx] CASpeexDecoder :: SetMagicCookie()\n", (UInt32) this);
    if (mIsInitialized)
        CODEC_THROW(kAudioCodecStateError);

    SetCookie(inMagicCookieData, inMagicCookieDataByteSize);

    InitializeCompressionSettings();

    if (!mCompressionInitialized)
        CODEC_THROW(kAudioCodecUnsupportedFormatError);
    dbg_printf("<.. [%08lx] CASpeexDecoder :: SetMagicCookie()\n", (UInt32) this);
}
Exemple #8
0
void CASpeexDecoder::SetCurrentInputFormat(const AudioStreamBasicDescription& inInputFormat)
{
    if (!mIsInitialized) {
        //	check to make sure the input format is legal
        if (inInputFormat.mFormatID != kAudioFormatXiphSpeex) {
            dbg_printf("CASpeexDecoder::SetFormats: only supports Xiph Speex for input\n");
            CODEC_THROW(kAudioCodecUnsupportedFormatError);
        }

        //	tell our base class about the new format
        XCACodec::SetCurrentInputFormat(inInputFormat);
    } else {
        CODEC_THROW(kAudioCodecStateError);
    }
}
Exemple #9
0
void CASpeexDecoder::Initialize(const AudioStreamBasicDescription* inInputFormat,
                                const AudioStreamBasicDescription* inOutputFormat,
                                const void* inMagicCookie, UInt32 inMagicCookieByteSize)
{
    dbg_printf(" >> [%08lx] CASpeexDecoder :: Initialize(%d, %d, %d)\n", (UInt32) this, inInputFormat != NULL, inOutputFormat != NULL, inMagicCookieByteSize != 0);

    if(inInputFormat != NULL) {
        SetCurrentInputFormat(*inInputFormat);
    }

    if(inOutputFormat != NULL) {
        SetCurrentOutputFormat(*inOutputFormat);
    }

    if ((mInputFormat.mSampleRate != mOutputFormat.mSampleRate) ||
        (mInputFormat.mChannelsPerFrame != mOutputFormat.mChannelsPerFrame)) {
        CODEC_THROW(kAudioCodecUnsupportedFormatError);
    }

    // needs to be called after input & output format have been set
    BDCInitialize(kSpeexDecoderInBufferSize);

    //if (inMagicCookieByteSize == 0)
    //    CODEC_THROW(kAudioCodecUnsupportedFormatError);

    if (inMagicCookieByteSize != 0) {
        SetMagicCookie(inMagicCookie, inMagicCookieByteSize);
    }

    XCACodec::Initialize(inInputFormat, inOutputFormat, inMagicCookie, inMagicCookieByteSize);
    dbg_printf("<.. [%08lx] CASpeexDecoder :: Initialize(%d, %d, %d)\n", (UInt32) this, inInputFormat != NULL, inOutputFormat != NULL, inMagicCookieByteSize != 0);
}
void	ACSimpleCodec::ConsumeInputData(UInt32 inConsumedByteSize)
{
	//	this is a convenience routine to make maintaining the ring buffer state easy
	UInt32 theContiguousRange = GetInputBufferContiguousByteSize();
	
	if(inConsumedByteSize > GetUsedInputBufferByteSize()) CODEC_THROW(kAudioCodecUnspecifiedError);
	
	if(inConsumedByteSize <= theContiguousRange)
	{
		//	the region to consume doesn't wrap
		
		//	figure out how much to consume
		inConsumedByteSize = (theContiguousRange < inConsumedByteSize) ? theContiguousRange : inConsumedByteSize;
		
		//	clear the consumed bits
		memset(mInputBuffer + mInputBufferStart, 0, inConsumedByteSize);
		
		//	adjust the start
		mInputBufferStart += inConsumedByteSize;
	}
	else
	{
		//	the region to consume will wrap
		
		//	clear the bits to the end of the buffer
		memset(mInputBuffer + mInputBufferStart, 0, theContiguousRange);
		
		//	now clear the bits left from the start
		memset(mInputBuffer, 0, inConsumedByteSize - theContiguousRange);
		
		//	adjust the start
		mInputBufferStart = inConsumedByteSize - theContiguousRange;
	}
}
Exemple #11
0
void	ACBaseCodec::SetMagicCookie(const void* outMagicCookieData, UInt32 inMagicCookieDataByteSize)
{
	if(mIsInitialized)
	{
		CODEC_THROW(kAudioCodecStateError);
	}
}
Exemple #12
0
void	ACShepA52Codec::Initialize(const AudioStreamBasicDescription* inInputFormat,
                                   const AudioStreamBasicDescription* inOutputFormat,
                                   const void* inMagicCookie, UInt32 inMagicCookieByteSize) {

    //	use the given arguments, if necessary
    if(inInputFormat != NULL)
    {
        SetCurrentInputFormat(*inInputFormat);
    }

    if(inOutputFormat != NULL)
    {
        SetCurrentOutputFormat(*inOutputFormat);
    }

    //	make sure the sample rate and number of channels match between the input format and the output format

    if( (mInputFormat.mSampleRate != mOutputFormat.mSampleRate))
    {
        CODEC_THROW(kAudioCodecUnsupportedFormatError);
    }


    ACSimpleCodec::Initialize(inInputFormat, inOutputFormat, inMagicCookie, inMagicCookieByteSize);
}
void	ACFLACCodec::Initialize(const AudioStreamBasicDescription* inInputFormat, const AudioStreamBasicDescription* inOutputFormat, const void* inMagicCookie, UInt32 inMagicCookieByteSize)
{
	//	use the given arguments, if necessary
	if(inInputFormat != NULL)
	{
		SetCurrentInputFormat(*inInputFormat);
	}

	if(inOutputFormat != NULL)
	{
		SetCurrentOutputFormat(*inOutputFormat);
	}
	
	//	make sure the sample rate and number of channels match between the input format and the output format
	if( (mInputFormat.mSampleRate != mOutputFormat.mSampleRate) ||
		(mInputFormat.mChannelsPerFrame != mOutputFormat.mChannelsPerFrame))
	{
#if VERBOSE	
		printf("The channels and sample rates don't match, mInputFormat.mSampleRate == %f, mOutputFormat.mSampleRate == %f, mInputFormat.mChannelsPerFrame == %lu, mOutputFormat.mChannelsPerFrame == %lu\n", 
				mInputFormat.mSampleRate, mOutputFormat.mSampleRate, mInputFormat.mChannelsPerFrame, mOutputFormat.mChannelsPerFrame);
#endif
		CODEC_THROW(kAudioCodecUnsupportedFormatError);
	}
	
	if(inMagicCookie != NULL)
	{
		SetMagicCookie(inMagicCookie, inMagicCookieByteSize);
	}
	ACBaseCodec::Initialize(inInputFormat, inOutputFormat, inMagicCookie, inMagicCookieByteSize);
}
Exemple #14
0
void	ACBaseCodec::SetCurrentOutputFormat(const AudioStreamBasicDescription& inOutputFormat)
{
	if(!mIsInitialized)
	{
		mOutputFormat = inOutputFormat;
	}
	else
	{
		CODEC_THROW(kAudioCodecStateError);
	}
}
Exemple #15
0
void CASpeexDecoder::SetCurrentOutputFormat(const AudioStreamBasicDescription& inOutputFormat)
{
    if (!mIsInitialized)
    {
        //	check to make sure the output format is legal
        if ((inOutputFormat.mFormatID != kAudioFormatLinearPCM) ||
            !(((inOutputFormat.mFormatFlags == kAudioFormatFlagsNativeFloatPacked) &&
               (inOutputFormat.mBitsPerChannel == 32)) ||
              ((inOutputFormat.mFormatFlags == (kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked)) &&
               (inOutputFormat.mBitsPerChannel == 16))))
        {
            dbg_printf("CASpeexDecoder::SetFormats: only supports"
                       " either 16 bit native endian signed integer or 32 bit native endian CoreAudio floats for output\n");
            CODEC_THROW(kAudioCodecUnsupportedFormatError);
        }

        //	tell our base class about the new format
        XCACodec::SetCurrentOutputFormat(inOutputFormat);
    } else {
        CODEC_THROW(kAudioCodecStateError);
    }
}
void	ACSimpleCodec::Initialize(const AudioStreamBasicDescription* inInputFormat, const AudioStreamBasicDescription* inOutputFormat, const void* inMagicCookie, UInt32 inMagicCookieByteSize)
{
	ReallocateInputBuffer(mInputBufferByteSize - kBufferPad);

	// By definition CBR has this greater than 0. We must avoid a div by 0 error in AppendInputData()
	// Note this will cause us to fail initialization which is intended
	if (mInputFormat.mBytesPerPacket == 0)
	{
		CODEC_THROW(kAudioCodecUnsupportedFormatError);
	}	
	
	ACBaseCodec::Initialize(inInputFormat, inOutputFormat, inMagicCookie, inMagicCookieByteSize);
}
Exemple #17
0
void CAOggFLACDecoder::InPacket(const void* inInputData, const AudioStreamPacketDescription* inPacketDescription)
{
    if (!mCompressionInitialized)
        CODEC_THROW(kAudioCodecUnspecifiedError);

    ogg_page op;

    if (!WrapOggPage(&op, inInputData, inPacketDescription->mDataByteSize + inPacketDescription->mStartOffset, inPacketDescription->mStartOffset))
        CODEC_THROW(kAudioCodecUnspecifiedError);

    dbg_printf("[ oFD]   : [%08lx] InPacket() [%4.4s] %ld\n", (UInt32) this, (char *) (static_cast<const Byte*> (inInputData) + inPacketDescription->mStartOffset),
               ogg_page_pageno(&op));

    ogg_packet opk;
    SInt32 packet_count = 0;
    int oret;
    AudioStreamPacketDescription flac_packet_desc = {0, 0, 0};
    UInt32 page_packets = ogg_page_packets(&op);

    ogg_stream_pagein(&mO_st, &op);
    while ((oret = ogg_stream_packetout(&mO_st, &opk)) != 0) {
        if (oret < 0) {
            page_packets--;
            continue;
        }

        packet_count++;

        flac_packet_desc.mDataByteSize = opk.bytes;

        CAFLACDecoder::InPacket(opk.packet, &flac_packet_desc);
    }

    if (packet_count > 0)
        complete_pages += 1;

    mFramesBufferedList.push_back(OggPagePacket(packet_count, inPacketDescription->mVariableFramesInPacket));
}
void ACAppleIMA4Encoder::SetProperty(AudioCodecPropertyID inPropertyID, UInt32 inPropertyDataSize, const void* inPropertyData)
{
	switch(inPropertyID)
	{
		case kAudioCodecPropertyAvailableInputSampleRates:
		case kAudioCodecPropertyAvailableOutputSampleRates:
		case kAudioCodecPropertyZeroFramesPadded:
		case kAudioCodecPropertyPrimeInfo:
			CODEC_THROW(kAudioCodecIllegalOperationError);
			break;
		default:
			ACAppleIMA4Codec::SetProperty(inPropertyID, inPropertyDataSize, inPropertyData);
			break;            
	}
}
void	ACSimpleCodec::SetProperty(AudioCodecPropertyID inPropertyID, UInt32 inPropertyDataSize, const void* inPropertyData)
{
	switch(inPropertyID)
	{
		case kAudioCodecPropertyInputBufferSize:
			if(inPropertyDataSize == sizeof(UInt32))
			{
				ReallocateInputBuffer(*reinterpret_cast<const UInt32*>(inPropertyData));
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
		default:
            ACBaseCodec::SetProperty(inPropertyID, inPropertyDataSize, inPropertyData);
            break;            
    }
}
void	ACSimpleCodec::AppendInputBuffer(const void* inInputData, UInt32 inOffset, UInt32& ioInputDataByteSize)
{
	//	this buffer handling code doesn't care about such things as the packet descriptions
	if(!mIsInitialized) CODEC_THROW(kAudioCodecStateError);
	
	//	this is a ring buffer we're dealing with, so we need to set up a few things
	UInt32 theUsedByteSize = GetUsedInputBufferByteSize();
	UInt32 theAvailableByteSize = GetInputBufferByteSize() - theUsedByteSize;
	
	const Byte* theInputData = static_cast<const Byte*>(inInputData) + inOffset;
	
	if(ioInputDataByteSize > theAvailableByteSize) {
		ioInputDataByteSize = theAvailableByteSize;
	}
	
	//	now we have to copy the data taking into account the wrap around and where the start is
	if(mInputBufferEnd + ioInputDataByteSize < mInputBufferByteSize)
	{
		//	no wrap around here
		memcpy(mInputBuffer + mInputBufferEnd, theInputData, ioInputDataByteSize);
		
		//	adjust the end point
		mInputBufferEnd += ioInputDataByteSize;
	}
	else
	{
		//	the copy will wrap
		
		//	copy the first part
		UInt32 theBeforeWrapByteSize = mInputBufferByteSize - mInputBufferEnd;
		memcpy(mInputBuffer + mInputBufferEnd, theInputData, theBeforeWrapByteSize);
		
		//	and the rest
		UInt32 theAfterWrapByteSize = ioInputDataByteSize - theBeforeWrapByteSize;
		memcpy(mInputBuffer, theInputData + theBeforeWrapByteSize, theAfterWrapByteSize);
		
		//	adjust the end point
		mInputBufferEnd = theAfterWrapByteSize;
	}
}
Byte* ACSimpleCodec::GetBytes(UInt32& ioNumberBytes) const
{
	// if a client's algorithm has to have contiguous data and mInputBuffer wraps, then someone has to make a copy.
	// I can do it more efficiently than the client. 
	
	if(!mIsInitialized) CODEC_THROW(kAudioCodecStateError);

	UInt32 theUsedByteSize = GetUsedInputBufferByteSize();
	//UInt32 theAvailableByteSize = GetInputBufferByteSize() - theUsedByteSize;
	
	if (ioNumberBytes > theUsedByteSize) ioNumberBytes = theUsedByteSize;
		
	SInt32 leftOver = mInputBufferStart + ioNumberBytes - mInputBufferByteSize;
	
	if(leftOver > 0)
	{
		// need to copy beginning of buffer to the end. 
		// We cleverly over allocated our buffer space to make this possible.
		memmove(mInputBuffer + mInputBufferByteSize, mInputBuffer, leftOver);
	}
	
	return GetInputBufferStart();
}
Exemple #22
0
void	ACBaseCodec::GetPropertyInfo(AudioCodecPropertyID inPropertyID, UInt32& outPropertyDataSize, Boolean& outWritable)
{
	switch(inPropertyID)
	{
		case kAudioCodecPropertyNameCFString:
			outPropertyDataSize = SizeOf32(CFStringRef);
			outWritable = false;
			break;
			
		case kAudioCodecPropertyManufacturerCFString:
			outPropertyDataSize = SizeOf32(CFStringRef);
			outWritable = false;
			break;
			
		case kAudioCodecPropertyFormatCFString:
			outPropertyDataSize = SizeOf32(CFStringRef);
			outWritable = false;
			break;
		case kAudioCodecPropertyRequiresPacketDescription:
			outPropertyDataSize = SizeOf32(UInt32);
			outWritable = false;
			break;
			
		case kAudioCodecPropertyMinimumNumberInputPackets :
			outPropertyDataSize = SizeOf32(UInt32);
			outWritable = false;
			break;
			
		case kAudioCodecPropertyMinimumNumberOutputPackets :
			outPropertyDataSize = SizeOf32(UInt32);
			outWritable = false;
			break;

		case kAudioCodecPropertyCurrentInputFormat:
			outPropertyDataSize = SizeOf32(AudioStreamBasicDescription);
			outWritable = true;
			break;
			
		case kAudioCodecPropertySupportedInputFormats:
		case kAudioCodecPropertyInputFormatsForOutputFormat:
			outPropertyDataSize = GetNumberSupportedInputFormats() * SizeOf32(AudioStreamBasicDescription);
			outWritable = false;
			break;
			
		case kAudioCodecPropertyCurrentOutputFormat:
			outPropertyDataSize = SizeOf32(AudioStreamBasicDescription);
			outWritable = true;
			break;
			
		case kAudioCodecPropertySupportedOutputFormats:
		case kAudioCodecPropertyOutputFormatsForInputFormat:
			outPropertyDataSize = GetNumberSupportedOutputFormats() * SizeOf32(AudioStreamBasicDescription);
			outWritable = false;
			break;
			
		case kAudioCodecPropertyMagicCookie:
			outPropertyDataSize = GetMagicCookieByteSize();
			outWritable = true;
			break;
			
		case kAudioCodecPropertyInputBufferSize:
			outPropertyDataSize = SizeOf32(UInt32);
			outWritable = false;
			break;
			
		case kAudioCodecPropertyUsedInputBufferSize:
			outPropertyDataSize = SizeOf32(UInt32);
			outWritable = false;
			break;
		
		case kAudioCodecPropertyIsInitialized:
			outPropertyDataSize = SizeOf32(UInt32);
			outWritable = false;
			break;

		case kAudioCodecPropertyAvailableNumberChannels:
			outPropertyDataSize = SizeOf32(UInt32) * 2; // Mono, stereo
			outWritable = false;
			break;
			
 		case kAudioCodecPropertyPrimeMethod:
			outPropertyDataSize = SizeOf32(UInt32);
			outWritable = false;
			break;

 		case kAudioCodecPropertyPrimeInfo:
			outPropertyDataSize = SizeOf32(AudioCodecPrimeInfo);
			outWritable = false;
			break;

 		case kAudioCodecPropertyDoesSampleRateConversion:
			outPropertyDataSize = SizeOf32(UInt32);
			outWritable = false;
			break;

		default:
			CODEC_THROW(kAudioCodecUnknownPropertyError);
			break;
			
	};
}
Exemple #23
0
void CASpeexDecoder::InitializeCompressionSettings()
{
    if (mCookie == NULL)
        return;

    if (mCompressionInitialized) {
        memset(&mSpeexHeader, 0, sizeof(mSpeexHeader));

        mSpeexStereoState.balance = 1.0;
        mSpeexStereoState.e_ratio = 0.5;
        mSpeexStereoState.smooth_left = 1.0;
        mSpeexStereoState.smooth_right = 1.0;

        if (mSpeexDecoderState != NULL) {
            speex_decoder_destroy(mSpeexDecoderState);
            mSpeexDecoderState = NULL;
        }
    }

    mCompressionInitialized = false;

    OggSerialNoAtom *atom = reinterpret_cast<OggSerialNoAtom*> (mCookie);
    Byte *ptrheader = mCookie + EndianU32_BtoN(atom->size);
    CookieAtomHeader *aheader = reinterpret_cast<CookieAtomHeader*> (ptrheader);

    // scan quickly through the cookie, check types and packet sizes
    if (EndianS32_BtoN(atom->type) != kCookieTypeOggSerialNo || static_cast<UInt32> (ptrheader - mCookie) > mCookieSize)
        return;
    ptrheader += EndianU32_BtoN(aheader->size);
    if (EndianS32_BtoN(aheader->type) != kCookieTypeSpeexHeader || static_cast<UInt32> (ptrheader - mCookie) > mCookieSize)
        return;
    // we ignore the rest: comments and extra headers

    // all OK, back to the first speex packet
    aheader = reinterpret_cast<CookieAtomHeader*> (mCookie + EndianU32_BtoN(atom->size));
    SpeexHeader *inheader = reinterpret_cast<SpeexHeader *> (&aheader->data[0]);

    // TODO: convert, at some point, mSpeexHeader to a pointer?
    mSpeexHeader.bitrate =                 EndianS32_LtoN(inheader->bitrate);
    mSpeexHeader.extra_headers =           EndianS32_LtoN(inheader->extra_headers);
    mSpeexHeader.frame_size =              EndianS32_LtoN(inheader->frame_size);
    mSpeexHeader.frames_per_packet =       EndianS32_LtoN(inheader->frames_per_packet);
    mSpeexHeader.header_size =             EndianS32_LtoN(inheader->header_size);
    mSpeexHeader.mode =                    EndianS32_LtoN(inheader->mode);
    mSpeexHeader.mode_bitstream_version =  EndianS32_LtoN(inheader->mode_bitstream_version);
    mSpeexHeader.nb_channels =             EndianS32_LtoN(inheader->nb_channels);
    mSpeexHeader.rate =                    EndianS32_LtoN(inheader->rate);
    mSpeexHeader.reserved1 =               EndianS32_LtoN(inheader->reserved1);
    mSpeexHeader.reserved2 =               EndianS32_LtoN(inheader->reserved2);
    mSpeexHeader.speex_version_id =        EndianS32_LtoN(inheader->speex_version_id);
    mSpeexHeader.vbr =                     EndianS32_LtoN(inheader->vbr);

    if (mSpeexHeader.mode >= SPEEX_NB_MODES)
        CODEC_THROW(kAudioCodecUnsupportedFormatError);

    //TODO: check bitstream version here

    mSpeexDecoderState = speex_decoder_init(speex_lib_get_mode(mSpeexHeader.mode));

    if (!mSpeexDecoderState)
        CODEC_THROW(kAudioCodecUnsupportedFormatError);

    //TODO: fix some of the header fields here

    int enhzero = 0;
    speex_decoder_ctl(mSpeexDecoderState, SPEEX_SET_ENH, &enhzero);

    if (mSpeexHeader.nb_channels == 2)
    {
        SpeexCallback callback;
        callback.callback_id = SPEEX_INBAND_STEREO;
        callback.func = speex_std_stereo_request_handler;
        callback.data = &mSpeexStereoState;
        speex_decoder_ctl(mSpeexDecoderState, SPEEX_SET_HANDLER, &callback);
    }

    mCompressionInitialized = true;
}
Exemple #24
0
void	ACBaseCodec::SetProperty(AudioCodecPropertyID inPropertyID, UInt32 inPropertyDataSize, const void* inPropertyData)
{
	// No property can be set when the codec is initialized
	if(mIsInitialized)
	{
		CODEC_THROW(kAudioCodecIllegalOperationError);
	}
	
	switch(inPropertyID)
	{
		case kAudioCodecPropertyCurrentInputFormat:
			if(inPropertyDataSize == SizeOf32(AudioStreamBasicDescription))
			{
				SetCurrentInputFormat(*reinterpret_cast<const AudioStreamBasicDescription*>(inPropertyData));
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
		case kAudioCodecPropertyCurrentOutputFormat:
			if(inPropertyDataSize == SizeOf32(AudioStreamBasicDescription))
			{
				SetCurrentOutputFormat(*reinterpret_cast<const AudioStreamBasicDescription*>(inPropertyData));
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
		case kAudioCodecPropertyMagicCookie:
			SetMagicCookie(inPropertyData, inPropertyDataSize);
			break;
			
		case kAudioCodecPropertyMinimumNumberOutputPackets :
		case kAudioCodecPropertyMinimumNumberInputPackets :
		case kAudioCodecPropertyInputBufferSize:
		case kAudioCodecPropertyNameCFString:
		case kAudioCodecPropertyManufacturerCFString:
		case kAudioCodecPropertyFormatCFString:
		case kAudioCodecPropertySupportedInputFormats:
		case kAudioCodecPropertySupportedOutputFormats:
		case kAudioCodecPropertyUsedInputBufferSize:
		case kAudioCodecPropertyIsInitialized:
		case kAudioCodecPropertyAvailableNumberChannels:
		case kAudioCodecPropertyPrimeMethod:
		case kAudioCodecPropertyPrimeInfo:
		case kAudioCodecPropertyOutputFormatsForInputFormat:
		case kAudioCodecPropertyInputFormatsForOutputFormat:
		case kAudioCodecPropertyDoesSampleRateConversion:
		case kAudioCodecPropertyRequiresPacketDescription:
			CODEC_THROW(kAudioCodecIllegalOperationError);
			break;
			
		default:
			CODEC_THROW(kAudioCodecUnknownPropertyError);
			break;
	};
}
void	ACFLACCodec::GetProperty(AudioCodecPropertyID inPropertyID, UInt32& ioPropertyDataSize, void* outPropertyData)
{	
	// kAudioCodecPropertyMaximumPacketByteSize is handled in the Encoder or Decoder
	
	switch(inPropertyID)
	{
		case kAudioCodecPropertyFormatCFString:
		{
			if (ioPropertyDataSize != sizeof(CFStringRef))
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			
			CABundleLocker lock;
			CFStringRef name = CFCopyLocalizedStringFromTableInBundle(CFSTR("FLAC"), CFSTR("CodecNames"), GetCodecBundle(), CFSTR(""));
			*(CFStringRef*)outPropertyData = name;
			break; 
		}

       case kAudioCodecPropertyRequiresPacketDescription:
  			if(ioPropertyDataSize == sizeof(UInt32))
			{
                *reinterpret_cast<UInt32*>(outPropertyData) = 1; 
            }
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
            break;
			
        case kAudioCodecPropertyHasVariablePacketByteSizes:
  			if(ioPropertyDataSize == sizeof(UInt32))
			{
                *reinterpret_cast<UInt32*>(outPropertyData) = 1; // We are variable bitrate
            }
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
            break;
			
		case kAudioCodecPropertyPacketFrameSize:
			if(ioPropertyDataSize == sizeof(UInt32))
			{
                *reinterpret_cast<UInt32*>(outPropertyData) = kFramesPerPacket;
            }
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
		case kAudioCodecPropertyMagicCookie:
			if(ioPropertyDataSize >= GetMagicCookieByteSize())
			{
				GetMagicCookie(outPropertyData, ioPropertyDataSize);
				mMagicCookieLength = ioPropertyDataSize;
			}
			else
			{
				CODEC_THROW(kAudioCodecIllegalOperationError);
			}
			break;
			
        case kAudioCodecPropertyCurrentInputSampleRate:
  			if(ioPropertyDataSize == sizeof(Float64))
			{
                *reinterpret_cast<Float64*>(outPropertyData) = (Float64)(mInputFormat.mSampleRate);
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
            break;
			
		case kAudioCodecPropertyCurrentOutputSampleRate:
  			if(ioPropertyDataSize == sizeof(Float64))
			{
				*reinterpret_cast<Float64*>(outPropertyData) = (Float64)(mOutputFormat.mSampleRate);
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
		case kAudioCodecPropertyInputChannelLayout:
		case kAudioCodecPropertyOutputChannelLayout:
			AudioChannelLayout temp1AudioChannelLayout;
			memset(&temp1AudioChannelLayout, 0, sizeof(AudioChannelLayout));
  			if(ioPropertyDataSize == sizeof(AudioChannelLayout))
			{
				temp1AudioChannelLayout.mChannelLayoutTag = sChannelLayoutTags[mInputFormat.mChannelsPerFrame - 1];
				memcpy(outPropertyData, &temp1AudioChannelLayout, ioPropertyDataSize);
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
		case kAudioCodecPropertyAvailableInputChannelLayouts:
		case kAudioCodecPropertyAvailableOutputChannelLayouts:
  			if(ioPropertyDataSize == kMaxChannels * sizeof(AudioChannelLayoutTag))
			{
				if(mIsInitialized)
				{
					AudioChannelLayoutTag temp2AudioChannelLayout[1];
					temp2AudioChannelLayout[0] = sChannelLayoutTags[mInputFormat.mChannelsPerFrame - 1];
					ioPropertyDataSize = sizeof(AudioChannelLayoutTag);
					memcpy(reinterpret_cast<AudioChannelLayoutTag*>(outPropertyData), temp2AudioChannelLayout, ioPropertyDataSize);
				}
				else
				{
					AudioChannelLayoutTag tempAudioChannelLayout[kMaxChannels];
					tempAudioChannelLayout[0] = kAudioChannelLayoutTag_Mono;
					tempAudioChannelLayout[1] = kAudioChannelLayoutTag_Stereo;
					tempAudioChannelLayout[2] = kAudioChannelLayoutTag_MPEG_3_0_B;
					tempAudioChannelLayout[3] = kAudioChannelLayoutTag_MPEG_4_0_B;
					tempAudioChannelLayout[4] = kAudioChannelLayoutTag_MPEG_5_0_D;
					tempAudioChannelLayout[5] = kAudioChannelLayoutTag_MPEG_5_1_D;
					tempAudioChannelLayout[6] = kAudioChannelLayoutTag_AAC_6_1;
					tempAudioChannelLayout[7] = kAudioChannelLayoutTag_MPEG_7_1_B;
					memcpy(reinterpret_cast<AudioChannelLayoutTag*>(outPropertyData), tempAudioChannelLayout, ioPropertyDataSize);
				}
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
		case kAudioCodecPropertyFormatInfo:
			if(ioPropertyDataSize == sizeof(AudioFormatInfo))
			{
				AudioFormatInfo& formatInfo = *(AudioFormatInfo*)outPropertyData;
				
				// Check for cookie existence
				if((NULL != formatInfo.mMagicCookie) && (formatInfo.mMagicCookieSize > 0))
				{
					UInt32 theByteSize = formatInfo.mMagicCookieSize;
					
					FLAC__StreamMetadata_StreamInfo theConfig;
					memset (&theConfig, 0, sizeof(FLAC__StreamMetadata_StreamInfo));
					ParseMagicCookie(formatInfo.mMagicCookie, theByteSize, &theConfig);
					formatInfo.mASBD.mSampleRate = (Float64)theConfig.sample_rate;
					formatInfo.mASBD.mChannelsPerFrame = theConfig.channels;
					formatInfo.mASBD.mFramesPerPacket = theConfig.max_blocksize;
					formatInfo.mASBD.mBytesPerPacket = 0; // it's never CBR
					switch (theConfig.bits_per_sample)
					{
						case 16:
							formatInfo.mASBD.mFormatFlags = kFLACFormatFlag_16BitSourceData;
							break;
						case 20:
							formatInfo.mASBD.mFormatFlags = kFLACFormatFlag_20BitSourceData;
							break;
						case 24:
							formatInfo.mASBD.mFormatFlags = kFLACFormatFlag_24BitSourceData;
							break;
						case 32:
							formatInfo.mASBD.mFormatFlags = kFLACFormatFlag_32BitSourceData;
							break;
						default: // we don't support this
							formatInfo.mASBD.mFormatFlags = 0;
							break;						
					}
				}
				else
				{
					// We don't have a cookie, we have to check the ASBD 
					// according to the input formats
					UInt32 i;
					for(i = 0; i < GetNumberSupportedInputFormats(); ++i)
					{
						if(mInputFormatList[i].IsEqual(formatInfo.mASBD))
						{
							// IsEqual will treat 0 values as wildcards -- we can't have that with the format flags
							UInt32 tempFormatFlags = formatInfo.mASBD.mFormatFlags;
							// Fill out missing entries
							CAStreamBasicDescription::FillOutFormat(formatInfo.mASBD, mInputFormatList[i]);
							if (tempFormatFlags == 0)
							{
								formatInfo.mASBD.mFormatFlags = 0; // anything assigned here would be bad.
							}
							break;
						}
					}
					if(i == GetNumberSupportedInputFormats())
					{
						// No suitable settings found
						CODEC_THROW(kAudioCodecUnsupportedFormatError);						
					}
				}
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
		default:
			ACBaseCodec::GetProperty(inPropertyID, ioPropertyDataSize, outPropertyData);
	}
}
void	ACAppleIMA4Encoder::GetProperty(AudioCodecPropertyID inPropertyID, UInt32& ioPropertyDataSize, void* outPropertyData)
{	
	switch(inPropertyID)
	{
#if !BUILD_ADEC_LIB
		case kAudioCodecPropertyNameCFString:
		{
			if (ioPropertyDataSize != sizeof(CFStringRef))
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			CABundleLocker lock;
			CFStringRef name = CFCopyLocalizedStringFromTableInBundle(CFSTR("Acme IMA4 encoder"), CFSTR("CodecNames"), GetCodecBundle(), CFSTR(""));
			*(CFStringRef*)outPropertyData = name;
			break; 
		}
#endif
		case kAudioCodecPropertyAvailableNumberChannels:
  			if(ioPropertyDataSize == sizeof(UInt32) * kMaxIMA4Channels)
			{
				memcpy(reinterpret_cast<UInt32*>(outPropertyData), mSupportedChannelTotals, ioPropertyDataSize);
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
		case kAudioCodecPropertyAvailableInputSampleRates:
  			if(ioPropertyDataSize == sizeof(AudioValueRange) )
			{
				(reinterpret_cast<AudioValueRange*>(outPropertyData))->mMinimum = 0.0;
				(reinterpret_cast<AudioValueRange*>(outPropertyData))->mMaximum = 0.0;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
		case kAudioCodecPropertyAvailableOutputSampleRates:
  			if(ioPropertyDataSize == sizeof(AudioValueRange) )
			{
				(reinterpret_cast<AudioValueRange*>(outPropertyData))->mMinimum = 0.0;
				(reinterpret_cast<AudioValueRange*>(outPropertyData))->mMaximum = 0.0;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
		case kAudioCodecPropertyPrimeInfo:
  			if(ioPropertyDataSize == sizeof(AudioCodecPrimeInfo) )
			{
				(reinterpret_cast<AudioCodecPrimeInfo*>(outPropertyData))->leadingFrames = 0;
				(reinterpret_cast<AudioCodecPrimeInfo*>(outPropertyData))->trailingFrames = mZeroesPadded;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
		case kAudioCodecPropertyZeroFramesPadded:
			if(ioPropertyDataSize == sizeof(UInt32))
			{
				*reinterpret_cast<UInt32*>(outPropertyData) = mZeroesPadded;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
		default:
			ACAppleIMA4Codec::GetProperty(inPropertyID, ioPropertyDataSize, outPropertyData);
	}
}
//FLAC__StreamMetadata_StreamInfo
void ACFLACCodec::GetMagicCookie(void* outMagicCookieData, UInt32& ioMagicCookieDataByteSize) const
{

	Byte *						buffer;
	Byte *						currPtr;
	AudioFormatAtom * frmaAtom;
	FullAtomHeader * flacAtom;
	AudioTerminatorAtom * termAtom;
	SInt32						atomSize;
	UInt32						flacSize;
	UInt32						chanSize;
	UInt32						frmaSize;
	UInt32						termSize;
	FLAC__StreamMetadata_StreamInfo *		config;
	OSStatus					status;
	UInt32						tempMaxFrameBytes;
	
	//RequireAction( sampleDesc != nil, return paramErr; );

	config		= nil;
	
	frmaSize = sizeof(AudioFormatAtom);
	flacSize		= sizeof(FullAtomHeader) + sizeof(FLAC__StreamMetadata_StreamInfo);
	chanSize		= 0;
	termSize = sizeof(AudioTerminatorAtom);

	// if we're encoding more than two channels, add an AudioChannelLayout atom to describe the layout
	if ( mOutputFormat.mChannelsPerFrame > 2 )
	{
		chanSize = sizeof(FullAtomHeader) + offsetof(AudioChannelLayout, mChannelDescriptions);
	}

	// create buffer of the required size
	atomSize = frmaSize + flacSize + chanSize + termSize;
	
	// Someone might have a stereo/mono cookie while we're trying to do surround.
	if ((UInt32)atomSize > ioMagicCookieDataByteSize)
	{
		CODEC_THROW(kAudioCodecBadPropertySizeError);
	}
	
	tempMaxFrameBytes = kInputBufferPackets * mOutputFormat.mChannelsPerFrame * ((10 + kMaxSampleSize) / 8) + 1;

	buffer = (Byte *)calloc( atomSize, 1 );
	currPtr = buffer;

	// fill in the atom stuff
	frmaAtom = (AudioFormatAtom *) currPtr;
	frmaAtom->size			= EndianU32_NtoB( frmaSize );
	frmaAtom->atomType		= EndianU32_NtoB( kAudioFormatAtomType );
	frmaAtom->format	= EndianU32_NtoB( 'flac' );
	currPtr += frmaSize;

	// fill in the FLAC config
	flacAtom = (FullAtomHeader *) currPtr;
	flacAtom->size				= EndianU32_NtoB( flacSize );
	flacAtom->type				= EndianU32_NtoB( 'flac' );
	flacAtom->versionFlags		= 0;
	currPtr += sizeof(FullAtomHeader);

/*
	unsigned min_blocksize, max_blocksize;
	unsigned min_framesize, max_framesize;
	unsigned sample_rate;
	unsigned channels;
	unsigned bits_per_sample;
	FLAC__uint64 total_samples;
	FLAC__byte md5sum[16];
*/
	config = (FLAC__StreamMetadata_StreamInfo *) currPtr;
	if (mCookieDefined)
	{
		config->min_blocksize	= EndianU32_NtoB( mStreamInfo.min_blocksize );
		config->max_blocksize	= EndianU32_NtoB( mStreamInfo.max_blocksize );
		config->min_framesize	= EndianU32_NtoB( mStreamInfo.min_framesize );
		config->max_framesize	= EndianU32_NtoB( mStreamInfo.max_framesize );
		config->sample_rate		= EndianU32_NtoB( mStreamInfo.sample_rate );
		config->channels		= EndianU32_NtoB( mStreamInfo.channels );
		config->bits_per_sample	= EndianU32_NtoB( mStreamInfo.bits_per_sample );
		config->total_samples	= EndianU64_NtoB( mStreamInfo.total_samples );
		config->md5sum[0]		= mStreamInfo.md5sum[0];
		config->md5sum[1]		= mStreamInfo.md5sum[1];
		config->md5sum[2]		= mStreamInfo.md5sum[2];
		config->md5sum[3]		= mStreamInfo.md5sum[3];
		config->md5sum[4]		= mStreamInfo.md5sum[4];
		config->md5sum[5]		= mStreamInfo.md5sum[5];
		config->md5sum[6]		= mStreamInfo.md5sum[6];
		config->md5sum[7]		= mStreamInfo.md5sum[7];
		config->md5sum[8]		= mStreamInfo.md5sum[8];
		config->md5sum[9]		= mStreamInfo.md5sum[9];
		config->md5sum[10]		= mStreamInfo.md5sum[10];
		config->md5sum[11]		= mStreamInfo.md5sum[11];
		config->md5sum[12]		= mStreamInfo.md5sum[12];
		config->md5sum[13]		= mStreamInfo.md5sum[13];
		config->md5sum[14]		= mStreamInfo.md5sum[14];
		config->md5sum[15]		= mStreamInfo.md5sum[15];
	}
	else
	{
		config->min_blocksize	= EndianU32_NtoB( kFLACDefaultFrameSize );
		config->max_blocksize	= EndianU32_NtoB( kFLACDefaultFrameSize );
		config->min_framesize	= EndianU32_NtoB( 0 );
		config->max_framesize	= EndianU32_NtoB( 0 );
		config->sample_rate		= EndianU32_NtoB( (UInt32)(mOutputFormat.mSampleRate) );
		config->channels		= EndianU32_NtoB(mOutputFormat.mChannelsPerFrame);
		config->bits_per_sample	= EndianU32_NtoB(mBitDepth);
		config->total_samples	= 0;
		config->md5sum[0]		= 0;
		config->md5sum[1]		= 0;
		config->md5sum[2]		= 0;
		config->md5sum[3]		= 0;
		config->md5sum[4]		= 0;
		config->md5sum[5]		= 0;
		config->md5sum[6]		= 0;
		config->md5sum[7]		= 0;
		config->md5sum[8]		= 0;
		config->md5sum[9]		= 0;
		config->md5sum[10]		= 0;
		config->md5sum[11]		= 0;
		config->md5sum[12]		= 0;
		config->md5sum[13]		= 0;
		config->md5sum[14]		= 0;
		config->md5sum[15]		= 0;
	}

	currPtr += sizeof(FLAC__StreamMetadata_StreamInfo);

	// if we're encoding more than two channels, add an AudioChannelLayout atom to describe the layout
	// Unfortunately there is no way to avoid dealing with an atom here
	if ( mOutputFormat.mChannelsPerFrame > 2 )
	{
		AudioChannelLayoutTag		tag;
		FullAtomHeader *			chan;
		AudioChannelLayout *		layout;
		
		chan = (FullAtomHeader *) currPtr;
		chan->size = EndianU32_NtoB( chanSize );
		chan->type = EndianU32_NtoB( AudioChannelLayoutAID );
		// version flags == 0
		currPtr += sizeof(FullAtomHeader);
		
		// we use a predefined set of layout tags so we don't need to write any channel descriptions
		layout = (AudioChannelLayout *) currPtr;
		tag = sChannelLayoutTags[mOutputFormat.mChannelsPerFrame - 1];
		layout->mChannelLayoutTag			= EndianU32_NtoB( tag );
		layout->mChannelBitmap				= 0;
		layout->mNumberChannelDescriptions	= 0;
		currPtr += offsetof(AudioChannelLayout, mChannelDescriptions);
	}
	
	// fill in Terminator atom header
	termAtom = (AudioTerminatorAtom *) currPtr;
	termAtom->size = EndianU32_NtoB( termSize );
	termAtom->atomType = EndianU32_NtoB( kAudioTerminatorAtomType );

	// all good, return the new description
	memcpy (outMagicCookieData, (const void *)(buffer), atomSize);
	ioMagicCookieDataByteSize = atomSize;
	status = noErr;
	
	// delete any memory we allocated
	if ( buffer != NULL )
	{
		delete buffer;
		buffer = NULL;
	}

}
Exemple #28
0
void	ACBaseCodec::GetProperty(AudioCodecPropertyID inPropertyID, UInt32& ioPropertyDataSize, void* outPropertyData)
{
	UInt32 thePacketsToGet;
	
	switch(inPropertyID)
	{
		case kAudioCodecPropertyNameCFString:
		{
			if (ioPropertyDataSize != SizeOf32(CFStringRef)) CODEC_THROW(kAudioCodecBadPropertySizeError);
			
			CABundleLocker lock;
			CFStringRef name = CFCopyLocalizedStringFromTableInBundle(CFSTR("unknown codec"), CFSTR("CodecNames"), GetCodecBundle(), CFSTR(""));
			*(CFStringRef*)outPropertyData = name;
			break; 
		}
		
		case kAudioCodecPropertyManufacturerCFString:
		{
			if (ioPropertyDataSize != SizeOf32(CFStringRef)) CODEC_THROW(kAudioCodecBadPropertySizeError);
			
			CABundleLocker lock;
			CFStringRef name = CFCopyLocalizedStringFromTableInBundle(CFSTR("Apple, Inc."), CFSTR("CodecNames"), GetCodecBundle(), CFSTR(""));
			*(CFStringRef*)outPropertyData = name;
			break; 
		}
        case kAudioCodecPropertyRequiresPacketDescription:
  			if(ioPropertyDataSize == SizeOf32(UInt32))
			{
                *reinterpret_cast<UInt32*>(outPropertyData) = 0; 
            }
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
            break;
			
		case kAudioCodecPropertyMinimumNumberInputPackets :
			if(ioPropertyDataSize != SizeOf32(UInt32)) CODEC_THROW(kAudioCodecBadPropertySizeError);
			*(UInt32*)outPropertyData = 1;
			break;
			
		case kAudioCodecPropertyMinimumNumberOutputPackets :
			if(ioPropertyDataSize != SizeOf32(UInt32)) CODEC_THROW(kAudioCodecBadPropertySizeError);
			*(UInt32*)outPropertyData = 1;
			break;
			
		case kAudioCodecPropertyCurrentInputFormat:
			if(ioPropertyDataSize == SizeOf32(AudioStreamBasicDescription))
			{
				GetCurrentInputFormat(*reinterpret_cast<AudioStreamBasicDescription*>(outPropertyData));
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
		case kAudioCodecPropertySupportedInputFormats:
		case kAudioCodecPropertyInputFormatsForOutputFormat:
			thePacketsToGet = ioPropertyDataSize / SizeOf32(AudioStreamBasicDescription);
			GetSupportedInputFormats(reinterpret_cast<AudioStreamBasicDescription*>(outPropertyData), thePacketsToGet);
			ioPropertyDataSize = thePacketsToGet * SizeOf32(AudioStreamBasicDescription);
			break;
			
		case kAudioCodecPropertyCurrentOutputFormat:
			if(ioPropertyDataSize == SizeOf32(AudioStreamBasicDescription))
			{
				GetCurrentOutputFormat(*reinterpret_cast<AudioStreamBasicDescription*>(outPropertyData));
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
		case kAudioCodecPropertySupportedOutputFormats:
		case kAudioCodecPropertyOutputFormatsForInputFormat:
			thePacketsToGet = ioPropertyDataSize / SizeOf32(AudioStreamBasicDescription);
			GetSupportedOutputFormats(reinterpret_cast<AudioStreamBasicDescription*>(outPropertyData), thePacketsToGet);
			ioPropertyDataSize = thePacketsToGet * SizeOf32(AudioStreamBasicDescription);
			break;
			
		case kAudioCodecPropertyMagicCookie:
			if(ioPropertyDataSize >= GetMagicCookieByteSize())
			{
				GetMagicCookie(outPropertyData, ioPropertyDataSize);
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
		case kAudioCodecPropertyInputBufferSize:
			if(ioPropertyDataSize == SizeOf32(UInt32))
			{
				*reinterpret_cast<UInt32*>(outPropertyData) = GetInputBufferByteSize();
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
		case kAudioCodecPropertyUsedInputBufferSize:
			if(ioPropertyDataSize == SizeOf32(UInt32))
			{
				*reinterpret_cast<UInt32*>(outPropertyData) = GetUsedInputBufferByteSize();
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
		case kAudioCodecPropertyIsInitialized:
			if(ioPropertyDataSize == SizeOf32(UInt32))
			{
				*reinterpret_cast<UInt32*>(outPropertyData) = IsInitialized() ? 1 : 0;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;
			
        case kAudioCodecPropertyAvailableNumberChannels:
  			if(ioPropertyDataSize == SizeOf32(UInt32) * 2)
			{
				(reinterpret_cast<UInt32*>(outPropertyData))[0] = 1;
				(reinterpret_cast<UInt32*>(outPropertyData))[1] = 2;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

        case kAudioCodecPropertyPrimeMethod:
  			if(ioPropertyDataSize == SizeOf32(UInt32))
			{
				*reinterpret_cast<UInt32*>(outPropertyData) = (UInt32)kAudioCodecPrimeMethod_None;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

		case kAudioCodecPropertyPrimeInfo:
  			if(ioPropertyDataSize == SizeOf32(AudioCodecPrimeInfo) )
			{
				(reinterpret_cast<AudioCodecPrimeInfo*>(outPropertyData))->leadingFrames = 0;
				(reinterpret_cast<AudioCodecPrimeInfo*>(outPropertyData))->trailingFrames = 0;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

        case kAudioCodecPropertyDoesSampleRateConversion:
  			if(ioPropertyDataSize == SizeOf32(UInt32))
			{
				*reinterpret_cast<UInt32*>(outPropertyData) = 0;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

		default:
			CODEC_THROW(kAudioCodecUnknownPropertyError);
			break;
			
	};
}
Exemple #29
0
void CASpeexDecoder::GetProperty(AudioCodecPropertyID inPropertyID, UInt32& ioPropertyDataSize, void* outPropertyData)
{
    dbg_printf(" >> [%08lx] CASpeexDecoder :: GetProperty('%4.4s')\n", (UInt32) this, reinterpret_cast<char*> (&inPropertyID));
    switch(inPropertyID)
    {
    case kAudioCodecPropertyRequiresPacketDescription:
        if(ioPropertyDataSize == sizeof(UInt32))
        {
            *reinterpret_cast<UInt32*>(outPropertyData) = 1;
        }
        else
        {
            CODEC_THROW(kAudioCodecBadPropertySizeError);
        }
        break;
    case kAudioCodecPropertyHasVariablePacketByteSizes:
        if(ioPropertyDataSize == sizeof(UInt32))
        {
            *reinterpret_cast<UInt32*>(outPropertyData) = 1;
        }
        else
        {
            CODEC_THROW(kAudioCodecBadPropertySizeError);
        }
        break;
    case kAudioCodecPropertyPacketFrameSize:
        if(ioPropertyDataSize == sizeof(UInt32))
        {
            UInt32 *outProp = reinterpret_cast<UInt32*>(outPropertyData);
            if (!mCompressionInitialized)
                *outProp = kSpeexFramesPerPacket;
            else if (mSpeexHeader.frame_size != 0 * mSpeexHeader.frames_per_packet != 0)
                *outProp = mSpeexHeader.frame_size * mSpeexHeader.frames_per_packet;
            else
                *outProp = 8192;
            if (*outProp < 8192 && mInputFormat.mFormatID == kAudioFormatXiphOggFramedSpeex)
                *outProp = 8192;
            dbg_printf("  = [%08lx] CASpeexDecoder :: GetProperty('pakf'): %ld\n",
                       (UInt32) this, *outProp);
        }
        else
        {
            CODEC_THROW(kAudioCodecBadPropertySizeError);
        }
        break;

        //case kAudioCodecPropertyQualitySetting: ???
#if TARGET_OS_MAC
    case kAudioCodecPropertyNameCFString:
        {
            if (ioPropertyDataSize != sizeof(CFStringRef)) CODEC_THROW(kAudioCodecBadPropertySizeError);

            CABundleLocker lock;
            CFStringRef name = CFCopyLocalizedStringFromTableInBundle(CFSTR("Xiph Speex decoder"), CFSTR("CodecNames"), GetCodecBundle(), CFSTR(""));
            *(CFStringRef*)outPropertyData = name;
            break;
        }

        //case kAudioCodecPropertyManufacturerCFString:
#endif
    default:
        ACBaseCodec::GetProperty(inPropertyID, ioPropertyDataSize, outPropertyData);
    }
    dbg_printf("<.. [%08lx] CASpeexDecoder :: GetProperty('%4.4s')\n", (UInt32) this, reinterpret_cast<char*> (&inPropertyID));
}
void	ACFLACEncoder::GetProperty(AudioCodecPropertyID inPropertyID, UInt32& ioPropertyDataSize, void* outPropertyData)
{
	switch(inPropertyID)
	{
		case kAudioCodecPropertyNameCFString:
		{
			if (ioPropertyDataSize != sizeof(CFStringRef))
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			CABundleLocker lock;
			CFStringRef name = CFCopyLocalizedStringFromTableInBundle(CFSTR("FLAC encoder"), CFSTR("CodecNames"), GetCodecBundle(), CFSTR(""));
			*(CFStringRef*)outPropertyData = name;
			break; 
		}
		
		case kAudioCodecPropertyAvailableNumberChannels:
  			if(ioPropertyDataSize == sizeof(UInt32) * kFLACNumberSupportedChannelTotals)
			{
				memcpy(reinterpret_cast<UInt32*>(outPropertyData), mSupportedChannelTotals, ioPropertyDataSize);
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

		case kAudioCodecPropertyAvailableInputSampleRates:
  			if(ioPropertyDataSize == sizeof(AudioValueRange) )
			{
				(reinterpret_cast<AudioValueRange*>(outPropertyData))->mMinimum = 0.0;
				(reinterpret_cast<AudioValueRange*>(outPropertyData))->mMaximum = 0.0;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

		case kAudioCodecPropertyAvailableOutputSampleRates:
  			if(ioPropertyDataSize == sizeof(AudioValueRange) )
			{
				(reinterpret_cast<AudioValueRange*>(outPropertyData))->mMinimum = 0.0;
				(reinterpret_cast<AudioValueRange*>(outPropertyData))->mMaximum = 0.0;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

        case kAudioCodecPropertyPrimeMethod:
  			if(ioPropertyDataSize == sizeof(UInt32))
			{
				*reinterpret_cast<UInt32*>(outPropertyData) = (UInt32)kAudioCodecPrimeMethod_None;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

		case kAudioCodecPropertyPrimeInfo:
  			if(ioPropertyDataSize == sizeof(AudioCodecPrimeInfo) )
			{
				(reinterpret_cast<AudioCodecPrimeInfo*>(outPropertyData))->leadingFrames = 0;
				(reinterpret_cast<AudioCodecPrimeInfo*>(outPropertyData))->trailingFrames = mTrailingFrames;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

        case kAudioCodecPropertyQualitySetting:
  			if(ioPropertyDataSize == sizeof(UInt32))
			{
                *reinterpret_cast<UInt32*>(outPropertyData) = mQuality;
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

       case kAudioCodecPropertyMaximumPacketByteSize:
			if(ioPropertyDataSize == sizeof(UInt32))
			{
				if (mMaxFrameBytes)
				{
					*reinterpret_cast<UInt32*>(outPropertyData) = mMaxFrameBytes;
				}
				else // default case
				{
					*reinterpret_cast<UInt32*>(outPropertyData) = mMaxFrameBytes = kInputBufferPackets * mOutputFormat.mChannelsPerFrame * (mBitDepth >> 3) + kMaxEscapeHeaderBytes;
				}
			#if VERBOSE
				printf("Max packet size == %lu, mBitDepth == %lu\n", mMaxFrameBytes, mBitDepth);
			#endif
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

		case kAudioCodecInputFormatsForOutputFormat:
			if(ioPropertyDataSize >= sizeof(AudioStreamBasicDescription))
			{
				UInt32 bitDepth, numFormats = 1, tempSize;
				switch ( ( ( (AudioStreamBasicDescription*)(outPropertyData) )[0].mFormatFlags) & 0x00000007)
				{
					case kFLACFormatFlag_16BitSourceData:
						bitDepth = 16;
						break;
					case kFLACFormatFlag_20BitSourceData:
						bitDepth = 24;
						break;
					case kFLACFormatFlag_24BitSourceData:
						bitDepth = 24;
						break;						
					case kFLACFormatFlag_32BitSourceData:
						bitDepth = 32;
						break;
					default: // Check the currently set input format bit depth
						bitDepth = mInputFormat.mBitsPerChannel;
						numFormats = 2;
						break;
				}
				AudioStreamBasicDescription theInputFormat = {kAudioStreamAnyRate, kAudioFormatLinearPCM, kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked, 0, 1, 0, 0, bitDepth, 0};
				tempSize = sizeof(AudioStreamBasicDescription) * numFormats;
				if ( tempSize <= ioPropertyDataSize )
				{
					ioPropertyDataSize = tempSize;
				}
				else
				{
					CODEC_THROW(kAudioCodecBadPropertySizeError);
				}
				if ( numFormats == 1 )
				{
					memcpy(outPropertyData, &theInputFormat, ioPropertyDataSize);
				}
				else // numFormats == 2
				{
					theInputFormat.mBitsPerChannel = 16;
					memcpy(outPropertyData, &theInputFormat, sizeof(AudioStreamBasicDescription));
					theInputFormat.mBitsPerChannel = 24;
					memcpy((void *)((Byte *)outPropertyData + sizeof(AudioStreamBasicDescription)), &theInputFormat, sizeof(AudioStreamBasicDescription));
				}
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

        case kAudioCodecPropertyZeroFramesPadded:
			if(ioPropertyDataSize == sizeof(UInt32))
			{
				*reinterpret_cast<UInt32*>(outPropertyData) = 0; // we never append any extra zeros
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

		case kAudioCodecPropertySettings:
  			if(ioPropertyDataSize == sizeof(CFDictionaryRef *) )
			{
				BuildSettingsDictionary(reinterpret_cast<CFDictionaryRef *>(outPropertyData) );
			}
			else
			{
				CODEC_THROW(kAudioCodecBadPropertySizeError);
			}
			break;

		default:
			ACFLACCodec::GetProperty(inPropertyID, ioPropertyDataSize, outPropertyData);
	}