Ejemplo n.º 1
0
LoaderSourceFile::LoaderSourceFile( SourceFile *source, Target *target )
	: mSource( source ), mPacketOffset( 0 )
{
	AudioStreamBasicDescription sourceDescription;
	
	sourceDescription.mFormatID = source->mNativeFormatId; //kAudioFormatLinearPCM;
	sourceDescription.mFormatFlags = source->mNativeFormatFlags; //CalculateLPCMFlags( mSource->getBitsPerSample(), mSource->getBlockAlign() * 8, false, false, false );
	sourceDescription.mSampleRate = source->getSampleRate();
	sourceDescription.mBytesPerPacket = source->mBytesPerPacket; //( mSource->getBitsPerSample() * mSource->getChannelCount() ) / 8;
	sourceDescription.mFramesPerPacket = source->mFramesPerPacket; //1;
	sourceDescription.mBytesPerFrame = source->mBytesPerFrame; //( mSource->getBitsPerSample() * mSource->getChannelCount() ) / 8;
	sourceDescription.mChannelsPerFrame = source->getChannelCount();
	sourceDescription.mBitsPerChannel = source->getBitsPerSample();
	
	AudioStreamBasicDescription targetDescription;
	
	if( ! target->isPcm() ) {
		throw IoExceptionUnsupportedDataFormat();
	}
	
	//right now this always converts to linear PCM --that's probably ok
	targetDescription.mFormatID = kAudioFormatLinearPCM; //target->mNativeFormatId;
	targetDescription.mFormatFlags = CalculateLPCMFlags( target->getBitsPerSample(), target->getBlockAlign() * 8, target->isFloat(), target->isBigEndian(), ( ! target->isInterleaved() ) ); //target->mNativeFormatFlags
	targetDescription.mSampleRate = target->getSampleRate();
	targetDescription.mBytesPerPacket =  ( mSource->getBitsPerSample() * mSource->getChannelCount() ) / 8; //target->mBytesPerPacket;
	targetDescription.mFramesPerPacket = 1; //target->mFramesPerPacket;
	targetDescription.mBytesPerFrame = ( mSource->getBlockAlign() ); //target->mBytesPerFrame;
	targetDescription.mChannelsPerFrame = target->getChannelCount();
	targetDescription.mBitsPerChannel = target->getBitsPerSample();
	
	mConverter = shared_ptr<CocoaCaConverter>( new CocoaCaConverter( this, &LoaderSourceFile::dataInputCallback, sourceDescription, targetDescription, mSource->mMaxPacketSize ) );
}
Ejemplo n.º 2
0
void SoundOutput_MacOSX::mixer_thread_starting()
{
    audio_format.mSampleRate = frequency;
    audio_format.mFormatID = kAudioFormatLinearPCM;
    audio_format.mFormatFlags = CalculateLPCMFlags(8*sizeof(short),8*sizeof(short),false,false,false);
    audio_format.mBytesPerPacket = 2 * sizeof(short);
    audio_format.mFramesPerPacket = 1;
    audio_format.mBytesPerFrame = 2 * sizeof(short);
    audio_format.mChannelsPerFrame = 2;
    audio_format.mBitsPerChannel = 8 * sizeof (short);
    audio_format.mReserved = 0;

    OSStatus result = AudioQueueNewOutput(&audio_format, &SoundOutput_MacOSX::static_audio_queue_callback, this, CFRunLoopGetCurrent(), kCFRunLoopDefaultMode, 0, &audio_queue);
    if (result != 0)
        throw Exception("AudioQueueNewOutput failed");

    for (int i = 0; i < fragment_buffer_count; i++)
    {
        result = AudioQueueAllocateBuffer(audio_queue, fragment_size * sizeof(short) * 2, &audio_buffers[i]);
        if (result != 0)
            throw Exception("AudioQueueAllocateBuffer failed");
        audio_queue_callback(audio_queue, audio_buffers[i]);
    }
    result = AudioQueuePrime(audio_queue,0,NULL);
    if (result != 0)
        throw Exception("AudioQueuePrime failed");
    result = AudioQueueStart(audio_queue, 0);
    if (result != 0)
        throw Exception("AudioQueueStart failed");
}