//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // AUPulseDetector::AUPulseDetector // //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AUPulseDetector::AUPulseDetector(AudioUnit component) : AUEffectBase(component), mChildObject(NULL) { CreateElements(); CAStreamBasicDescription monoDesc; monoDesc.SetAUCanonical (1, false); monoDesc.mSampleRate = 44100.; GetOutput(0)->SetStreamFormat(monoDesc); GetInput(0)->SetStreamFormat(monoDesc); Globals()->UseIndexedParameters (5); Globals()->SetParameter (kPulseThreshold, kPulseThresholdDefault); Globals()->SetParameter (kPulseLength, kPulseLengthDefault); Globals()->SetParameter (kPulseRestTime, kPulseRestTimeDefault); Globals()->SetParameter (kDoPulseDetection, kDoPulseDetectionDefault); Globals()->SetParameter (kWritePulseStats, 0); mPulseTimeStats = new PulseTS[kPulseTSSize]; #if AU_DEBUG_DISPATCHER mDebugDispatcher = new AUDebugDispatcher (this); #endif }
Minim::AudioFormat::AudioFormat( float sampleRate, int numberOfChannels ) { CAStreamBasicDescription streamDesc; streamDesc.mSampleRate = 44100.0f; streamDesc.SetAUCanonical( numberOfChannels, true ); mChannels = streamDesc.mChannelsPerFrame; mSampleRate = streamDesc.mSampleRate; mFrameRate = streamDesc.mSampleRate; mFrameSize = streamDesc.mBytesPerFrame; mSampleSizeInBits = streamDesc.mBitsPerChannel; mBigEndian = (streamDesc.mFormatFlags & kLinearPCMFormatFlagIsBigEndian); }
int SetupRemoteIO (AudioUnit& inRemoteIOUnit, AURenderCallbackStruct inRenderProc, CAStreamBasicDescription& outFormat) { try { // Open the output unit AudioComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; AudioComponent comp = AudioComponentFindNext(NULL, &desc); XThrowIfError(AudioComponentInstanceNew(comp, &inRemoteIOUnit), "couldn't open the remote I/O unit"); UInt32 one = 1; XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one)), "couldn't enable input on the remote I/O unit"); XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &inRenderProc, sizeof(inRenderProc)), "couldn't set remote i/o render callback"); // NEWL: Establecer el formato canónico no de los AudioUnits sino del sistema de entrada/salida: // LPCM, no entrelazado, datos enteros con signo de 16 bits. // outFormat.SetCanonical(2, false); // OLDL: set our required format - Canonical AU format: LPCM non-interleaved 8.24 fixed point outFormat.SetAUCanonical(2, false); outFormat.mSampleRate = SAMPRATE; XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outFormat, sizeof(outFormat)), "couldn't set the remote I/O unit's output client format"); XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &outFormat, sizeof(outFormat)), "couldn't set the remote I/O unit's input client format"); XThrowIfError(AudioUnitInitialize(inRemoteIOUnit), "couldn't initialize the remote I/O unit"); } catch (CAXException &e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); return 1; } catch (...) { fprintf(stderr, "An unknown error occurred\n"); return 1; } return 0; }
void DoAQOfflineRender(CFURLRef sourceURL, CFURLRef destinationURL) { // main audio queue code try { AQTestInfo myInfo; myInfo.mDone = false; myInfo.mFlushed = false; myInfo.mCurrentPacket = 0; // get the source file XThrowIfError(AudioFileOpenURL(sourceURL, 0x01/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile), "AudioFileOpen failed"); UInt32 size = sizeof(myInfo.mDataFormat); XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyDataFormat, &size, &myInfo.mDataFormat), "couldn't get file's data format"); printf ("File format: "); myInfo.mDataFormat.Print(); // create a new audio queue output XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat, // The data format of the audio to play. For linear PCM, only interleaved formats are supported. AQTestBufferCallback, // A callback function to use with the playback audio queue. &myInfo, // A custom data structure for use with the callback function. CFRunLoopGetCurrent(), // The event loop on which the callback function pointed to by the inCallbackProc parameter is to be called. // If you specify NULL, the callback is invoked on one of the audio queue’s internal threads. kCFRunLoopCommonModes, // The run loop mode in which to invoke the callback function specified in the inCallbackProc parameter. 0, // Reserved for future use. Must be 0. &myInfo.mQueue), // On output, the newly created playback audio queue object. "AudioQueueNew failed"); UInt32 bufferByteSize; // we need to calculate how many packets we read at a time and how big a buffer we need // we base this on the size of the packets in the file and an approximate duration for each buffer { bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0); // first check to see what the max size of a packet is - if it is bigger // than our allocation default size, that needs to become larger UInt32 maxPacketSize; size = sizeof(maxPacketSize); XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size"); // adjust buffer size to represent about a second of audio based on this format CalculateBytesForTime(myInfo.mDataFormat, maxPacketSize, 1.0/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead); if (isFormatVBR) { myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead]; } else { myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM) } printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead); } // if the file has a magic cookie, we should get it and set it on the AQ size = sizeof(UInt32); OSStatus result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL); if (!result && size) { char* cookie = new char [size]; XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file"); XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue"); delete [] cookie; } // channel layout? OSStatus err = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &size, NULL); AudioChannelLayout *acl = NULL; if (err == noErr && size > 0) { acl = (AudioChannelLayout *)malloc(size); XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &size, acl), "get audio file's channel layout"); XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, acl, size), "set channel layout on queue"); } //allocate the input read buffer XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffer), "AudioQueueAllocateBuffer"); // prepare a canonical interleaved capture format CAStreamBasicDescription captureFormat; captureFormat.mSampleRate = myInfo.mDataFormat.mSampleRate; captureFormat.SetAUCanonical(myInfo.mDataFormat.mChannelsPerFrame, true); // interleaved XThrowIfError(AudioQueueSetOfflineRenderFormat(myInfo.mQueue, &captureFormat, acl), "set offline render format"); ExtAudioFileRef captureFile; // prepare a 16-bit int file format, sample channel count and sample rate CAStreamBasicDescription dstFormat; dstFormat.mSampleRate = myInfo.mDataFormat.mSampleRate; dstFormat.mChannelsPerFrame = myInfo.mDataFormat.mChannelsPerFrame; dstFormat.mFormatID = kAudioFormatLinearPCM; dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian dstFormat.mBitsPerChannel = 16; dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame; dstFormat.mFramesPerPacket = 1; // create the capture file XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, acl, kAudioFileFlags_EraseFile, &captureFile), "ExtAudioFileCreateWithURL"); // set the capture file's client format to be the canonical format from the queue XThrowIfError(ExtAudioFileSetProperty(captureFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &captureFormat), "set ExtAudioFile client format"); // allocate the capture buffer, just keep it at half the size of the enqueue buffer // we don't ever want to pull any faster than we can push data in for render // this 2:1 ratio keeps the AQ Offline Render happy const UInt32 captureBufferByteSize = bufferByteSize / 2; AudioQueueBufferRef captureBuffer; AudioBufferList captureABL; XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, captureBufferByteSize, &captureBuffer), "AudioQueueAllocateBuffer"); captureABL.mNumberBuffers = 1; captureABL.mBuffers[0].mData = captureBuffer->mAudioData; captureABL.mBuffers[0].mNumberChannels = captureFormat.mChannelsPerFrame; // lets start playing now - stop is called in the AQTestBufferCallback when there's // no more to read from the file XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed"); AudioTimeStamp ts; ts.mFlags = kAudioTimeStampSampleTimeValid; ts.mSampleTime = 0; // we need to call this once asking for 0 frames XThrowIfError(AudioQueueOfflineRender(myInfo.mQueue, &ts, captureBuffer, 0), "AudioQueueOfflineRender"); // we need to enqueue a buffer after the queue has started AQTestBufferCallback(&myInfo, myInfo.mQueue, myInfo.mBuffer); while (true) { UInt32 reqFrames = captureBufferByteSize / captureFormat.mBytesPerFrame; XThrowIfError(AudioQueueOfflineRender(myInfo.mQueue, &ts, captureBuffer, reqFrames), "AudioQueueOfflineRender"); captureABL.mBuffers[0].mData = captureBuffer->mAudioData; captureABL.mBuffers[0].mDataByteSize = captureBuffer->mAudioDataByteSize; UInt32 writeFrames = captureABL.mBuffers[0].mDataByteSize / captureFormat.mBytesPerFrame; printf("t = %.f: AudioQueueOfflineRender: req %d fr/%d bytes, got %ld fr/%d bytes\n", ts.mSampleTime, (int)reqFrames, (int)captureBufferByteSize, writeFrames, (int)captureABL.mBuffers[0].mDataByteSize); XThrowIfError(ExtAudioFileWrite(captureFile, writeFrames, &captureABL), "ExtAudioFileWrite"); if (myInfo.mFlushed) break; ts.mSampleTime += writeFrames; } CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false); XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed"); XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed"); XThrowIfError(ExtAudioFileDispose(captureFile), "ExtAudioFileDispose failed"); if (myInfo.mPacketDescs) delete [] myInfo.mPacketDescs; if (acl) free(acl); } catch (CAXException e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); } return; }