int	main(int argc, const char *argv[])
{
 	MyAudioConverterSettings audioConverterSettings = {0};
	
	// open the input audio file
	CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, kInputFileLocation, kCFURLPOSIXPathStyle, false);
    CheckResult (AudioFileOpenURL(inputFileURL, kAudioFileReadPermission , 0, &audioConverterSettings.inputFile),
				 "AudioFileOpenURL failed");
	CFRelease(inputFileURL);
	
	// get the audio data format from the file
	UInt32 propSize = sizeof(audioConverterSettings.inputFormat);
    CheckResult (AudioFileGetProperty(audioConverterSettings.inputFile, kAudioFilePropertyDataFormat, &propSize, &audioConverterSettings.inputFormat),
				 "couldn't get file's data format");
	
	// get the total number of packets in the file
	propSize = sizeof(audioConverterSettings.inputFilePacketCount);
    CheckResult (AudioFileGetProperty(audioConverterSettings.inputFile, kAudioFilePropertyAudioDataPacketCount, &propSize, &audioConverterSettings.inputFilePacketCount),
				 "couldn't get file's packet count");
	
	// get size of the largest possible packet
	propSize = sizeof(audioConverterSettings.inputFilePacketMaxSize);
    CheckResult(AudioFileGetProperty(audioConverterSettings.inputFile, kAudioFilePropertyMaximumPacketSize, &propSize, &audioConverterSettings.inputFilePacketMaxSize),
				"couldn't get file's max packet size");
	
	// define the ouput format. AudioConverter requires that one of the data formats be LPCM
    audioConverterSettings.outputFormat.mSampleRate = 44100.0;
	audioConverterSettings.outputFormat.mFormatID = kAudioFormatLinearPCM;
    audioConverterSettings.outputFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
	audioConverterSettings.outputFormat.mBytesPerPacket = 4;
	audioConverterSettings.outputFormat.mFramesPerPacket = 1;
	audioConverterSettings.outputFormat.mBytesPerFrame = 4;
	audioConverterSettings.outputFormat.mChannelsPerFrame = 2;
	audioConverterSettings.outputFormat.mBitsPerChannel = 16;
	
	// create output file
	// KEVIN: TODO: this fails if file exists. isn't there an overwrite flag we can use?
	CFURLRef outputFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, CFSTR("output.aif"), kCFURLPOSIXPathStyle, false);
	CheckResult (AudioFileCreateWithURL(outputFileURL, kAudioFileAIFFType, &audioConverterSettings.outputFormat, kAudioFileFlags_EraseFile, &audioConverterSettings.outputFile),
				 "AudioFileCreateWithURL failed");
    CFRelease(outputFileURL);
	
	fprintf(stdout, "Converting...\n");
	Convert(&audioConverterSettings);
	
cleanup:
	AudioFileClose(audioConverterSettings.inputFile);
	AudioFileClose(audioConverterSettings.outputFile);
	printf("Done\r");
	return 0;
}
bool DiskOut::initWithAudioFileAiff32(const char *audioFilePath, 
									  UGen const& input, 
									  bool overwriteExisitingFile) throw()
{
	Text path; // this needs to be here so it doesn't get garbage collected too early
	
	if(audioFilePath[0] != '/')
	{	
		path = NSUtilities::pathInDirectory(NSUtilities::Documents, audioFilePath);
		audioFilePath = path.getArray();
	}
	
	CFURLRef fileURL = CFURLCreateFromFileSystemRepresentation(NULL, 
															   (UInt8*)audioFilePath, 
															   strlen(audioFilePath), 
															   false);
	
	AudioStreamBasicDescription format;
	
	const int numChannels = input.getNumChannels();
	
	format.mChannelsPerFrame	= numChannels;
	format.mSampleRate			= UGen::getSampleRate();
	format.mFormatID			= kAudioFormatLinearPCM;
	format.mFormatFlags			= kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
	format.mBitsPerChannel		= 32;
	format.mBytesPerFrame		= format.mChannelsPerFrame * format.mBitsPerChannel / 8;
	format.mFramesPerPacket		= 1;
	format.mBytesPerPacket		= format.mBytesPerFrame * format.mFramesPerPacket;
	
	AudioFileID	audioFile;
	OSStatus status = AudioFileCreateWithURL(fileURL,
											 kAudioFileAIFFType,
											 &format,
											 overwriteExisitingFile ? kAudioFileFlags_EraseFile : 0,
											 &audioFile);
	
	if(status != noErr) return false;
	
	initInternal(numChannels);
	generateFromProxyOwner(new DiskOutUGenInternalAiff32(audioFile, format, input));
	
	return true;	
}
int	main(int argc, const char *argv[])
{
 	MyAudioConverterSettings audioConverterSettings = {0};
	
	// open the input with ExtAudioFile
	CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, kInputFileLocation, kCFURLPOSIXPathStyle, false);
	CheckResult(ExtAudioFileOpenURL(inputFileURL, 
									&audioConverterSettings.inputFile),
				"ExtAudioFileOpenURL failed");
	
	// define the ouput format. AudioConverter requires that one of the data formats be LPCM
    audioConverterSettings.outputFormat.mSampleRate = 44100.0;
	audioConverterSettings.outputFormat.mFormatID = kAudioFormatLinearPCM;
    audioConverterSettings.outputFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
	audioConverterSettings.outputFormat.mBytesPerPacket = 4;
	audioConverterSettings.outputFormat.mFramesPerPacket = 1;
	audioConverterSettings.outputFormat.mBytesPerFrame = 4;
	audioConverterSettings.outputFormat.mChannelsPerFrame = 2;
	audioConverterSettings.outputFormat.mBitsPerChannel = 16;
	
	// create output file
	CFURLRef outputFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, CFSTR("output.aif"), kCFURLPOSIXPathStyle, false);
	CheckResult (AudioFileCreateWithURL(outputFileURL, kAudioFileAIFFType, &audioConverterSettings.outputFormat, kAudioFileFlags_EraseFile, &audioConverterSettings.outputFile),
				 "AudioFileCreateWithURL failed");
    CFRelease(outputFileURL);
	
	// set the PCM format as the client format on the input ext audio file
	CheckResult(ExtAudioFileSetProperty(audioConverterSettings.inputFile,
										kExtAudioFileProperty_ClientDataFormat,
										sizeof (AudioStreamBasicDescription),
										&audioConverterSettings.outputFormat),
				"Couldn't set client data format on input ext file");
	
	fprintf(stdout, "Converting...\n");
	Convert(&audioConverterSettings);
	
cleanup:
	// AudioFileClose(audioConverterSettings.inputFile);
	ExtAudioFileDispose(audioConverterSettings.inputFile);
	AudioFileClose(audioConverterSettings.outputFile);
	return 0;
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate, UInt32 outputBitRate)
{
	AudioFileID         sourceFileID = 0;
    AudioFileID         destinationFileID = 0;
    AudioConverterRef   converter = NULL;
    Boolean             canResumeFromInterruption = true; // we can continue unless told otherwise
    
    CAStreamBasicDescription srcFormat, dstFormat;
    AudioFileIO afio = {};
    
    char                         *outputBuffer = NULL;
    AudioStreamPacketDescription *outputPacketDescriptions = NULL;
    
    OSStatus error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    
    printf("\nDoConvertFile\n");
    
    try {
        // get the source file
        XThrowIfError(AudioFileOpenURL(sourceURL, kAudioFileReadPermission, 0, &sourceFileID), "AudioFileOpenURL failed");
	
        // get the source data format
        UInt32 size = sizeof(srcFormat);
        XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyDataFormat, &size, &srcFormat), "couldn't get source data format");
        
        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if the output format is PC create a 16-bit int PCM file format description as an example
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("Source File format: "); srcFormat.Print();
        printf("Destination format: "); dstFormat.Print();
	
        // create the AudioConverter
        
        XThrowIfError(AudioConverterNew(&srcFormat, &dstFormat, &converter), "AudioConverterNew failed!");
    
        // if the source has a cookie, get it and set it on the Audio Converter
        ReadCookie(sourceFileID, converter);

        // get the actual formats back from the Audio Converter
        size = sizeof(srcFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentInputStreamDescription, &size, &srcFormat), "AudioConverterGetProperty kAudioConverterCurrentInputStreamDescription failed!");

        size = sizeof(dstFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentOutputStreamDescription, &size, &dstFormat), "AudioConverterGetProperty kAudioConverterCurrentOutputStreamDescription failed!");

        printf("Formats returned from AudioConverter:\n");
        printf("              Source format: "); srcFormat.Print();
        printf("    Destination File format: "); dstFormat.Print();
        
        // if encoding to AAC set the bitrate
        // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
        // when you explicitly set the bit rate and the sample rate, this tells the encoder to stick with both bit rate and sample rate
        //     but there are combinations (also depending on the number of channels) which will not be allowed
        // if you do not explicitly set a bit rate the encoder will pick the correct value for you depending on samplerate and number of channels
        // bit rate also scales with the number of channels, therefore one bit rate per sample rate can be used for mono cases
        //    and if you have stereo or more, you can multiply that number by the number of channels.
        
        if (outputBitRate == 0) {
            outputBitRate = 192000; // 192kbs
        }
        
        if (dstFormat.mFormatID == kAudioFormatMPEG4AAC) {
            
            UInt32 propSize = sizeof(outputBitRate);
            
            // set the bit rate depending on the samplerate chosen
            XThrowIfError(AudioConverterSetProperty(converter, kAudioConverterEncodeBitRate, propSize, &outputBitRate),
                           "AudioConverterSetProperty kAudioConverterEncodeBitRate failed!");
            
            // get it back and print it out
            AudioConverterGetProperty(converter, kAudioConverterEncodeBitRate, &propSize, &outputBitRate);
            printf ("AAC Encode Bitrate: %u\n", (unsigned int)outputBitRate);
        }

        // can the Audio Converter resume conversion after an interruption?
        // this property may be queried at any time after construction of the Audio Converter after setting its output format
        // there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(converter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported - see comments in source for more info.\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld, paramErr is OK if PCM\n", error);
            }
            
            error = noErr;
        }
        
        // create the destination file 
        XThrowIfError(AudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, kAudioFileFlags_EraseFile, &destinationFileID), "AudioFileCreateWithURL failed!");

        // set up source buffers and data proc info struct
        afio.srcFileID = sourceFileID;
        afio.srcBufferSize = 32768;
        afio.srcBuffer = new char [afio.srcBufferSize];
        afio.srcFilePos = 0;
        afio.srcFormat = srcFormat;
		
        if (srcFormat.mBytesPerPacket == 0) {
            // if the source format is VBR, we need to get the maximum packet size
            // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
            // in the file (without actually scanning the whole file to find the largest packet,
            // as may happen with kAudioFilePropertyMaximumPacketSize)
            size = sizeof(afio.srcSizePerPacket);
            XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyPacketSizeUpperBound, &size, &afio.srcSizePerPacket), "AudioFileGetProperty kAudioFilePropertyPacketSizeUpperBound failed!");
            
            // how many packets can we read for our buffer size?
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            afio.packetDescriptions = new AudioStreamPacketDescription [afio.numPacketsPerRead];
        } else {
            // CBR source format
            afio.srcSizePerPacket = srcFormat.mBytesPerPacket;
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            afio.packetDescriptions = NULL;
        }

        // set up output buffers
        UInt32 outputSizePerPacket = dstFormat.mBytesPerPacket; // this will be non-zero if the format is CBR
        UInt32 theOutputBufSize = 32768;
        outputBuffer = new char[theOutputBufSize];
        
        if (outputSizePerPacket == 0) {
            // if the destination format is VBR, we need to get max size per packet from the converter
            size = sizeof(outputSizePerPacket);
            XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterPropertyMaximumOutputPacketSize, &size, &outputSizePerPacket), "AudioConverterGetProperty kAudioConverterPropertyMaximumOutputPacketSize failed!");
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
        }
        UInt32 numOutputPackets = theOutputBufSize / outputSizePerPacket;

        // if the destination format has a cookie, get it and set it on the output file
        WriteCookie(converter, destinationFileID);

        // write destination channel layout
        if (srcFormat.mChannelsPerFrame > 2) {
            WriteDestinationChannelLayout(converter, sourceFileID, destinationFileID);
        }

        UInt64 totalOutputFrames = 0; // used for debgging printf
        SInt64 outputFilePos = 0;
        
        // loop to convert data
        printf("Converting...\n");
        while (1) {

            // set up output buffer list
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = dstFormat.mChannelsPerFrame;
            fillBufList.mBuffers[0].mDataByteSize = theOutputBufSize;
            fillBufList.mBuffers[0].mData = outputBuffer;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = NO;
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the Audio Converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            // convert data
            UInt32 ioOutputDataPackets = numOutputPackets;
            printf("AudioConverterFillComplexBuffer...\n");
            error = AudioConverterFillComplexBuffer(converter, EncoderDataProc, &afio, &ioOutputDataPackets, &fillBufList, outputPacketDescriptions);
            // if interrupted in the process of the conversion call, we must handle the error appropriately
            if (error) {
                if (kAudioConverterErr_HardwareInUse == error) {
                     printf("Audio Converter returned kAudioConverterErr_HardwareInUse!\n");
                } else {
                    XThrowIfError(error, "AudioConverterFillComplexBuffer error!");
                }
            } else {
                if (ioOutputDataPackets == 0) {
                    // this is the EOF conditon
                    error = noErr;
                    break;
                }
            }
            
            if (noErr == error) {
                // write to output file
                UInt32 inNumBytes = fillBufList.mBuffers[0].mDataByteSize;
                XThrowIfError(AudioFileWritePackets(destinationFileID, false, inNumBytes, outputPacketDescriptions, outputFilePos, &ioOutputDataPackets, outputBuffer), "AudioFileWritePackets failed!");
            
                printf("Convert Output: Write %lu packets at position %lld, size: %ld\n", ioOutputDataPackets, outputFilePos, inNumBytes);
                
                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.mFramesPerPacket) { 
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.mFramesPerPacket);
                } else if (outputPacketDescriptions != NULL) {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (UInt32 i = 0; i < ioOutputDataPackets; ++i)
                        totalOutputFrames += outputPacketDescriptions[i].mVariableFramesInPacket;
                }
            }
        } // while

        if (noErr == error) {
            // write out any of the leading and trailing frames for compressed formats only
            if (dstFormat.mBitsPerChannel == 0) {
                // our output frame count should jive with
                printf("Total number of output frames counted: %lld\n", totalOutputFrames); 
                WritePacketTableInfo(converter, destinationFileID);
            }
        
            // write the cookie again - sometimes codecs will update cookies at the end of a conversion
            WriteCookie(converter, destinationFileID);
        }
    }
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // cleanup
    if (converter) AudioConverterDispose(converter);
    if (destinationFileID) AudioFileClose(destinationFileID);
	if (sourceFileID) AudioFileClose(sourceFileID);
    
    if (afio.srcBuffer) delete [] afio.srcBuffer;
    if (afio.packetDescriptions) delete [] afio.packetDescriptions;
    if (outputBuffer) delete [] outputBuffer;
    if (outputPacketDescriptions) delete [] outputPacketDescriptions;
    
    
    return error;
}
void SoundRecorder::init(const char* audioSource,const SoundDataFormat& sFormat,const char* outputFileName)
	{
	/* Store and sanify the sound data format: */
	format.mSampleRate=double(sFormat.framesPerSecond);
	format.mFormatID=kAudioFormatLinearPCM;
	format.mFormatFlags=0x0;
	format.mBitsPerChannel=sFormat.bitsPerSample>8?(sFormat.bitsPerSample+7)&~0x7:8;
	format.mChannelsPerFrame=sFormat.samplesPerFrame>=1?sFormat.samplesPerFrame:1;
	format.mBytesPerFrame=format.mChannelsPerFrame*(format.mBitsPerChannel/8);
	format.mFramesPerPacket=1;
	format.mBytesPerPacket=format.mFramesPerPacket*format.mBytesPerFrame;
	
	/* Determine the output file format from the file name extension: */
	AudioFileTypeID audioFileType=kAudioFileWAVEType; // Not really a default; just to make compiler happy
	const char* ext=Misc::getExtension(outputFileName);
	if(*ext=='\0'||strcasecmp(ext,".aiff")==0)
		{
		/* Adjust the sound data format for AIFF files: */
		audioFileType=kAudioFileAIFFType;
		format.mFormatFlags=kLinearPCMFormatFlagIsBigEndian|kLinearPCMFormatFlagIsSignedInteger|kLinearPCMFormatFlagIsPacked;
		}
	else if(strcasecmp(ext,".wav")==0)
		{
		/* Adjust the sound data format for WAV files: */
		audioFileType=kAudioFileWAVEType;
		format.mFormatFlags=kLinearPCMFormatFlagIsPacked;
		if(format.mBitsPerChannel>8)
			format.mFormatFlags|=kLinearPCMFormatFlagIsSignedInteger;
		}
	else
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Output file name %s has unrecognized extension",outputFileName);
	
	/* Create the recording audio queue: */
	if(AudioQueueNewInput(&format,handleInputBufferWrapper,this,0,kCFRunLoopCommonModes,0,&queue)!=noErr)
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while creating audio queue");
	
	/* Retrieve the fully specified audio data format from the audio queue: */
	UInt32 formatSize=sizeof(format);
	if(AudioQueueGetProperty(queue,kAudioConverterCurrentOutputStreamDescription,&format,&formatSize)!=noErr)
		{
		AudioQueueDispose(queue,true);
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while retrieving audio queue sound format");
		}
	
	/* Open the target audio file: */
	CFURLRef audioFileURL=CFURLCreateFromFileSystemRepresentation(0,reinterpret_cast<const UInt8*>(outputFileName),strlen(outputFileName),false);
	if(AudioFileCreateWithURL(audioFileURL,audioFileType,&format,kAudioFileFlags_EraseFile,&audioFile)!=noErr)
		{
		AudioQueueDispose(queue,true);
		CFRelease(audioFileURL);
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while opening output file %s",outputFileName);
		}
	CFRelease(audioFileURL);
	
	/* Calculate an appropriate buffer size and allocate the sound buffers: */
	int maxPacketSize=format.mBytesPerPacket;
	if(maxPacketSize==0) // Must be a variable bit rate sound format
		{
		/* Query the expected maximum packet size from the audio queue: */
		UInt32 maxVBRPacketSize=sizeof(maxPacketSize);
		if(AudioQueueGetProperty(queue,kAudioConverterPropertyMaximumOutputPacketSize,&maxPacketSize,&maxVBRPacketSize)!=noErr)
			{
			AudioQueueDispose(queue,true);
			AudioFileClose(audioFile);
			Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while calcuating sample buffer size");
			}
		}
	
	/* Calculate an appropriate buffer size based on the given duration: */
	int numPackets=int(floor(double(format.mSampleRate)*0.25+0.5));
	bufferSize=UInt32(numPackets*maxPacketSize);
	
	/* Create the sample buffers: */
	for(int i=0;i<2;++i)
		{
		/* Create the sound buffer: */
		if(AudioQueueAllocateBuffer(queue,bufferSize,&buffers[i])!=noErr)
			{
			AudioQueueDispose(queue,true);
			AudioFileClose(audioFile);
			Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while allocating sample buffer %d",i);
			}
		
		/* Add the buffer to the queue: */
		if(AudioQueueEnqueueBuffer(queue,buffers[i],0,0)!=noErr)
			{
			AudioQueueDispose(queue,true);
			AudioFileClose(audioFile);
			Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while enqueuing sample buffer %d",i);
			}
		}
	}
// ____________________________________________________________________________________
// main program
int	main(int argc, const char *argv[])
{
	const char *recordFileName = NULL;
	int i, nchannels, bufferByteSize;
	float seconds = 0;
	AudioStreamBasicDescription recordFormat;
	MyRecorder aqr;
	UInt32 size;
	CFURLRef url;
    OSStatus err = noErr;
	
	// fill structures with 0/NULL
	memset(&recordFormat, 0, sizeof(recordFormat));
	memset(&aqr, 0, sizeof(aqr));
	
	// parse arguments
	for (i = 1; i < argc; ++i) {
		const char *arg = argv[i];
		
		if (arg[0] == '-') {
			switch (arg[1]) {
			case 'c':
				if (++i == argc) MissingArgument(arg);
				if (sscanf(argv[i], "%d", &nchannels) != 1)
					usage();
				recordFormat.mChannelsPerFrame = nchannels;
				break;
			case 'd':
				if (++i == argc) MissingArgument(arg);
				if (StrTo4CharCode(argv[i], &recordFormat.mFormatID) == 0)
					ParseError(arg, argv[i]);
				break;
			case 'r':
				if (++i == argc) MissingArgument(arg);
				if (sscanf(argv[i], "%lf", &recordFormat.mSampleRate) != 1)
					ParseError(arg, argv[i]);
				break;
			case 's':
				if (++i == argc) MissingArgument(arg);
				if (sscanf(argv[i], "%f", &seconds) != 1)
					ParseError(arg, argv[i]);
				break;
			case 'v':
				aqr.verbose = TRUE;
				break;
			default:
				fprintf(stderr, "unknown option: '%s'\n\n", arg);
				usage();
			}
		} else if (recordFileName != NULL) {
			fprintf(stderr, "may only specify one file to record\n\n");
			usage();
		} else
			recordFileName = arg;
	}
	if (recordFileName == NULL) // no record file path provided
		usage();
	
	// determine file format
	AudioFileTypeID audioFileType = kAudioFileCAFType;	// default to CAF
	CFStringRef cfRecordFileName = CFStringCreateWithCString(NULL, recordFileName, kCFStringEncodingUTF8);
	InferAudioFileFormatFromFilename(cfRecordFileName, &audioFileType);
	CFRelease(cfRecordFileName);

	// adapt record format to hardware and apply defaults
	if (recordFormat.mSampleRate == 0.)
		MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);

	if (recordFormat.mChannelsPerFrame == 0)
		recordFormat.mChannelsPerFrame = 2;
	
	if (recordFormat.mFormatID == 0 || recordFormat.mFormatID == kAudioFormatLinearPCM) {
		// default to PCM, 16 bit int
		recordFormat.mFormatID = kAudioFormatLinearPCM;
		recordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
		recordFormat.mBitsPerChannel = 16;
		if (MyFileFormatRequiresBigEndian(audioFileType, recordFormat.mBitsPerChannel))
			recordFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
		recordFormat.mBytesPerPacket = recordFormat.mBytesPerFrame =
			(recordFormat.mBitsPerChannel / 8) * recordFormat.mChannelsPerFrame;
		recordFormat.mFramesPerPacket = 1;
		recordFormat.mReserved = 0;
	}

	try {
		// create the queue
		XThrowIfError(AudioQueueNewInput(
			&recordFormat,
			MyInputBufferHandler,
			&aqr /* userData */,
			NULL /* run loop */, NULL /* run loop mode */,
			0 /* flags */, &aqr.queue), "AudioQueueNewInput failed");

		// get the record format back from the queue's audio converter --
		// the file may require a more specific stream description than was necessary to create the encoder.
		size = sizeof(recordFormat);
		XThrowIfError(AudioQueueGetProperty(aqr.queue, kAudioConverterCurrentOutputStreamDescription,
			&recordFormat, &size), "couldn't get queue's format");

		// convert recordFileName from C string to CFURL
		url = CFURLCreateFromFileSystemRepresentation(NULL, (Byte *)recordFileName, strlen(recordFileName), FALSE);
		XThrowIfError(!url, "couldn't create record file");
        
		// create the audio file
        err = AudioFileCreateWithURL(url, audioFileType, &recordFormat, kAudioFileFlags_EraseFile,
                                              &aqr.recordFile);
        CFRelease(url); // release first, and then bail out on error
		XThrowIfError(err, "AudioFileCreateWithURL failed");
		

		// copy the cookie first to give the file object as much info as we can about the data going in
		MyCopyEncoderCookieToFile(aqr.queue, aqr.recordFile);

		// allocate and enqueue buffers
		bufferByteSize = MyComputeRecordBufferSize(&recordFormat, aqr.queue, 0.5);	// enough bytes for half a second
		for (i = 0; i < kNumberRecordBuffers; ++i) {
			AudioQueueBufferRef buffer;
			XThrowIfError(AudioQueueAllocateBuffer(aqr.queue, bufferByteSize, &buffer),
				"AudioQueueAllocateBuffer failed");
			XThrowIfError(AudioQueueEnqueueBuffer(aqr.queue, buffer, 0, NULL),
				"AudioQueueEnqueueBuffer failed");
		}
		
		// record
		if (seconds > 0) {
			// user requested a fixed-length recording (specified a duration with -s)
			// to time the recording more accurately, watch the queue's IsRunning property
			XThrowIfError(AudioQueueAddPropertyListener(aqr.queue, kAudioQueueProperty_IsRunning,
				MyPropertyListener, &aqr), "AudioQueueAddPropertyListener failed");
			
			// start the queue
			aqr.running = TRUE;
			XThrowIfError(AudioQueueStart(aqr.queue, NULL), "AudioQueueStart failed");
			CFAbsoluteTime waitUntil = CFAbsoluteTimeGetCurrent() + 10;

			// wait for the started notification
			while (aqr.queueStartStopTime == 0.) {
				CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.010, FALSE);
				if (CFAbsoluteTimeGetCurrent() >= waitUntil) {
					fprintf(stderr, "Timeout waiting for the queue's IsRunning notification\n");
					goto cleanup;
				}
			}
			printf("Recording...\n");
			CFAbsoluteTime stopTime = aqr.queueStartStopTime + seconds;
			CFAbsoluteTime now = CFAbsoluteTimeGetCurrent();
			CFRunLoopRunInMode(kCFRunLoopDefaultMode, stopTime - now, FALSE);
		} else {
			// start the queue
			aqr.running = TRUE;
			XThrowIfError(AudioQueueStart(aqr.queue, NULL), "AudioQueueStart failed");
			
			// and wait
			printf("Recording, press <return> to stop:\n");
			getchar();
		}

		// end recording
		printf("* recording done *\n");
		
		aqr.running = FALSE;
		XThrowIfError(AudioQueueStop(aqr.queue, TRUE), "AudioQueueStop failed");
		
		// a codec may update its cookie at the end of an encoding session, so reapply it to the file now
		MyCopyEncoderCookieToFile(aqr.queue, aqr.recordFile);
		
cleanup:
		AudioQueueDispose(aqr.queue, TRUE);
		AudioFileClose(aqr.recordFile);
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "MyInputBufferHandler: %s (%s)\n", e.mOperation, e.FormatError(buf));
		return e.mError;
	}
		
	return 0;
}
int	main(int argc, const char *argv[])
{
	MyRecorder recorder = {0};
	AudioStreamBasicDescription recordFormat = {0};
	memset(&recordFormat, 0, sizeof(recordFormat));
	
	// Configure the output data format to be AAC
	recordFormat.mFormatID = kAudioFormatMPEG4AAC;
	recordFormat.mChannelsPerFrame = 2;
	
	// get the sample rate of the default input device
	// we use this to adapt the output data format to match hardware capabilities
	MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);
	
	// ProTip: Use the AudioFormat API to trivialize ASBD creation.
	//         input: atleast the mFormatID, however, at this point we already have
	//                mSampleRate, mFormatID, and mChannelsPerFrame
	//         output: the remainder of the ASBD will be filled out as much as possible
	//                 given the information known about the format
	UInt32 propSize = sizeof(recordFormat);
	CheckError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL,
									  &propSize, &recordFormat), "AudioFormatGetProperty failed");
	
	// create a input (recording) queue
	AudioQueueRef queue = {0};
	CheckError(AudioQueueNewInput(&recordFormat, // ASBD
								  MyAQInputCallback, // Callback
								  &recorder, // user data
								  NULL, // run loop
								  NULL, // run loop mode
								  0, // flags (always 0)
								  // &recorder.queue), // output: reference to AudioQueue object
								  &queue),
			   "AudioQueueNewInput failed");
	
	// since the queue is now initilized, we ask it's Audio Converter object
	// for the ASBD it has configured itself with. The file may require a more
	// specific stream description than was necessary to create the audio queue.
	//
	// for example: certain fields in an ASBD cannot possibly be known until it's
	// codec is instantiated (in this case, by the AudioQueue's Audio Converter object)
	UInt32 size = sizeof(recordFormat);
	CheckError(AudioQueueGetProperty(queue, kAudioConverterCurrentOutputStreamDescription,
									 &recordFormat, &size), "couldn't get queue's format");
	
	// create the audio file
	CFURLRef myFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, CFSTR("./output.caf"), kCFURLPOSIXPathStyle, false);
	CFShow (myFileURL);
	CheckError(AudioFileCreateWithURL(myFileURL, kAudioFileCAFType, &recordFormat,
									  kAudioFileFlags_EraseFile, &recorder.recordFile), "AudioFileCreateWithURL failed");
	CFRelease(myFileURL);
	
	// many encoded formats require a 'magic cookie'. we set the cookie first
	// to give the file object as much info as we can about the data it will be receiving
	MyCopyEncoderCookieToFile(queue, recorder.recordFile);
	
	// allocate and enqueue buffers
	int bufferByteSize = MyComputeRecordBufferSize(&recordFormat, queue, 0.5);	// enough bytes for half a second
	int bufferIndex;
    for (bufferIndex = 0; bufferIndex < kNumberRecordBuffers; ++bufferIndex)
	{
		AudioQueueBufferRef buffer;
		CheckError(AudioQueueAllocateBuffer(queue, bufferByteSize, &buffer),
				   "AudioQueueAllocateBuffer failed");
		CheckError(AudioQueueEnqueueBuffer(queue, buffer, 0, NULL),
				   "AudioQueueEnqueueBuffer failed");
	}
	
	// start the queue. this function return immedatly and begins
	// invoking the callback, as needed, asynchronously.
	recorder.running = TRUE;
	CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed");
	
	// and wait
	printf("Recording, press <return> to stop:\n");
	getchar();
	
	// end recording
	printf("* recording done *\n");
	recorder.running = FALSE;
	CheckError(AudioQueueStop(queue, TRUE), "AudioQueueStop failed");
	
	// a codec may update its magic cookie at the end of an encoding session
	// so reapply it to the file now
	MyCopyEncoderCookieToFile(queue, recorder.recordFile);
	
cleanup:
	AudioQueueDispose(queue, TRUE);
	AudioFileClose(recorder.recordFile);
	
	return 0;
}
Exemple #8
0
void Recorder::start()
{
	AudioQueueNewInput (                                              // 1
						&aqData.mDataFormat,                          // 2
						HandleInputBuffer,                            // 3
						&aqData,                                      // 4
						NULL,                                         // 5
						kCFRunLoopCommonModes,                        // 6
						0,                                            // 7
						&aqData.mQueue                                // 8
	);

	UInt32 dataFormatSize = sizeof (aqData.mDataFormat);       // 1
	 
	AudioQueueGetProperty (                                    // 2
	  aqData.mQueue,                                           // 3
	  kAudioConverterCurrentOutputStreamDescription,           // 4
	  &aqData.mDataFormat,                                     // 5
	  &dataFormatSize                                          // 6
	);
	
	const char *filePath = "recording.wav";
	
	audioFileURL =
		CFURLCreateFromFileSystemRepresentation (            // 1
			NULL,                                            // 2
			(const UInt8 *) filePath,                        // 3
			strlen (filePath),                               // 4
			false                                            // 5
		);
	AudioFileCreateWithURL (                                 // 6
		audioFileURL,                                        // 7
		fileType,                                            // 8
		&aqData.mDataFormat,                                 // 9
		kAudioFileFlags_EraseFile,                           // 10
		&aqData.mAudioFile                                   // 11
	);
	
	DeriveBufferSize (                               // 1
		aqData.mQueue,                               // 2
		aqData.mDataFormat,                          // 3
		0.5,                                         // 4
		&aqData.bufferByteSize                       // 5
	);

	for (int i = 0; i < kNumberBuffers; ++i) {           // 1
		AudioQueueAllocateBuffer (                       // 2
			aqData.mQueue,                               // 3
			aqData.bufferByteSize,                       // 4
			&aqData.mBuffers[i]                          // 5
		);
	 
		AudioQueueEnqueueBuffer (                        // 6
			aqData.mQueue,                               // 7
			aqData.mBuffers[i],                          // 8
			0,                                           // 9
			NULL                                         // 10
		);
	}

	aqData.mCurrentPacket = 0;                           // 1

	aqData.mIsRunning = true;

	AudioQueueStart (                                    // 3
		aqData.mQueue,                                   // 4
		NULL                                             // 5
	);
}