Exemplo n.º 1
0
// _______________________________________________________________________________________
//
void	CAAudioFile::Write(UInt32 numPackets, const AudioBufferList *data)
{
	if (mIOBufferList.mBuffers[0].mData == NULL) {
#if DEBUG
		printf("warning: CAAudioFile::AllocateBuffers called from WritePackets\n");
#endif
		AllocateBuffers();
	}

	if (mMode == kPreparingToWrite)
		mMode = kWriting;
	else
		XThrowIf(mMode != kWriting, kExtAudioFileError_InvalidOperationOrder, "can't write to this file");
	if (mConverter != NULL) {
		mWritePackets = numPackets;
		mWriteBufferList->SetFrom(data);
		WritePacketsFromCallback(WriteInputProc, this);
	} else {
		StartTiming(this, write);
		XThrowIfError(AudioFileWritePackets(mAudioFile, mUseCache, data->mBuffers[0].mDataByteSize,
						NULL, mPacketMark, &numPackets, data->mBuffers[0].mData),
						"write audio file");
		ElapsedTime(this, write, mTicksInIO);
#if VERBOSE_IO
		printf("CAAudioFile::WritePackets: wrote %ld packets at %qd, %ld bytes\n", numPackets, mPacketMark, data->mBuffers[0].mDataByteSize);
#endif
		//mNumberPackets =
		mPacketMark += numPackets;
		if (mFileDataFormat.mFramesPerPacket > 0)
			mFrameMark += numPackets * mFileDataFormat.mFramesPerPacket;
		// else: shouldn't happen since we're only called when there's no converter
	}
}
Exemplo n.º 2
0
// ____________________________________________________________________________________
// AudioQueue callback function, called when an input buffers has been filled.
static void MyInputBufferHandler(	void *                          inUserData,
									AudioQueueRef                   inAQ,
									AudioQueueBufferRef             inBuffer,
									const AudioTimeStamp *          inStartTime,
									UInt32							inNumPackets,
									const AudioStreamPacketDescription *inPacketDesc)
{
	MyRecorder *aqr = (MyRecorder *)inUserData;
	
	if (aqr->verbose) {
		printf("buf data %p, 0x%x bytes, 0x%x packets\n", inBuffer->mAudioData,
			(int)inBuffer->mAudioDataByteSize, (int)inNumPackets);
	}
	
	if (inNumPackets > 0) {
		// write packets to file
		CheckError(AudioFileWritePackets(aqr->recordFile, FALSE, inBuffer->mAudioDataByteSize,
			inPacketDesc, aqr->recordPacket, &inNumPackets, inBuffer->mAudioData),
			"AudioFileWritePackets failed");
		aqr->recordPacket += inNumPackets;
	}

	// if we're not stopping, re-enqueue the buffe so that it gets filled again
	if (aqr->running)
		CheckError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
}
Exemplo n.º 3
0
// ____________________________________________________________________________________
// AudioQueue callback function, called when an input buffers has been filled.
static void MyInputBufferHandler(	void *                          inUserData,
									AudioQueueRef                   inAQ,
									AudioQueueBufferRef             inBuffer,
									const AudioTimeStamp *          inStartTime,
									UInt32							inNumPackets,
									const AudioStreamPacketDescription *inPacketDesc)
{
	MyRecorder *aqr = (MyRecorder *)inUserData;

	try {
		if (aqr->verbose) {
			printf("buf data %p, 0x%x bytes, 0x%x packets\n", inBuffer->mAudioData,
				(int)inBuffer->mAudioDataByteSize, (int)inNumPackets);
		}
		
		if (inNumPackets > 0) {
			// write packets to file
			XThrowIfError(AudioFileWritePackets(aqr->recordFile, FALSE, inBuffer->mAudioDataByteSize,
				inPacketDesc, aqr->recordPacket, &inNumPackets, inBuffer->mAudioData),
				"AudioFileWritePackets failed");
			aqr->recordPacket += inNumPackets;
		}

		// if we're not stopping, re-enqueue the buffe so that it gets filled again
		if (aqr->running)
			XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
	} 
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "MyInputBufferHandler: %s (%s)\n", e.mOperation, e.FormatError(buf));
	}	
}
// Audio Queue callback function, called when an input buffer has been filled.
static void MyAQInputCallback(void *inUserData, AudioQueueRef inQueue,
							  AudioQueueBufferRef inBuffer,
							  const AudioTimeStamp *inStartTime,
							  UInt32 inNumPackets,
							  const AudioStreamPacketDescription *inPacketDesc)
{
	MyRecorder *recorder = (MyRecorder *)inUserData;
	
	// if inNumPackets is greater then zero, our buffer contains audio data
	// in the format we specified (AAC)
	if (inNumPackets > 0)
	{
		// write packets to file
		CheckError(AudioFileWritePackets(recorder->recordFile, FALSE, inBuffer->mAudioDataByteSize,
										 inPacketDesc, recorder->recordPacket, &inNumPackets, 
										 inBuffer->mAudioData), "AudioFileWritePackets failed");
		// increment packet index
		recorder->recordPacket += inNumPackets;
	}
	
	// if we're not stopping, re-enqueue the buffer so that it gets filled again
	if (recorder->running)
		CheckError(AudioQueueEnqueueBuffer(inQueue, inBuffer,
										   0, NULL), "AudioQueueEnqueueBuffer failed");
}
Exemplo n.º 5
0
void	HLAudioFile::WriteAudioFrames(SInt64 inOffset, UInt32& ioNumberFrames, void* inData, bool inCache)
{
	ThrowIf(mAudioFileID == 0, CAException(fnOpnErr), "HLAudioFile::WriteAudioFrames: file isn't prepared");
	
	UInt32 theNumberBytesToWrite = ioNumberFrames * mFormat.mBytesPerFrame;
	OSStatus theError = AudioFileWritePackets(mAudioFileID, inCache, theNumberBytesToWrite, NULL, inOffset, &ioNumberFrames, inData);
	ThrowIfError(theError, CAException(theError), "HLAudioFile::WriteAudioFrames: couldn't write the data");
}
void Convert(MyAudioConverterSettings *mySettings)
{	
	
	UInt32 outputBufferSize = 32 * 1024; // 32 KB is a good starting point
	UInt32 sizePerPacket = mySettings->outputFormat.mBytesPerPacket;	
	UInt32 packetsPerBuffer = outputBufferSize / sizePerPacket;
	
	// allocate destination buffer
	UInt8 *outputBuffer = (UInt8 *)malloc(sizeof(UInt8) * outputBufferSize);
	
	UInt32 outputFilePacketPosition = 0; //in bytes
	while(1)
	{
		// wrap the destination buffer in an AudioBufferList
		AudioBufferList convertedData;
		convertedData.mNumberBuffers = 1;
		convertedData.mBuffers[0].mNumberChannels = mySettings->outputFormat.mChannelsPerFrame;
		convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
		convertedData.mBuffers[0].mData = outputBuffer;
		
		UInt32 frameCount = packetsPerBuffer;
		
		// read from the extaudiofile
		CheckResult(ExtAudioFileRead(mySettings->inputFile,
									 &frameCount,
									 &convertedData),
					"Couldn't read from input file");
		
		if (frameCount == 0) {
			printf ("done reading from file");
			return;
		}
		
		// write the converted data to the output file
		CheckResult (AudioFileWritePackets(mySettings->outputFile,
										   FALSE,
										   frameCount,
										   NULL,
										   outputFilePacketPosition / mySettings->outputFormat.mBytesPerPacket, 
										   &frameCount,
										   convertedData.mBuffers[0].mData),
					 "Couldn't write packets to file");
		
		// advance the output file write location
		outputFilePacketPosition += (frameCount * mySettings->outputFormat.mBytesPerPacket);
	}
	
	// AudioConverterDispose(audioConverter);
}
Exemplo n.º 7
0
// _______________________________________________________________________________________
//
void	CAAudioFile::WritePacketsFromCallback(
								AudioConverterComplexInputDataProc	inInputDataProc,
								void *								inInputDataProcUserData)
{
	while (true) {
		// keep writing until we exhaust the input (temporary stop), or produce no output (EOF)
		UInt32 numEncodedPackets = mIOBufferSizePackets;
		mIOBufferList.mBuffers[0].mDataByteSize = mIOBufferSizeBytes;
#if CAAUDIOFILE_PROFILE
		mInConverter = true;
#endif
		StartTiming(this, fill);
		OSStatus err = AudioConverterFillComplexBuffer(mConverter, inInputDataProc, inInputDataProcUserData,
					&numEncodedPackets, &mIOBufferList, mPacketDescs);
		ElapsedTime(this, fill, mTicksInConverter);
#if CAAUDIOFILE_PROFILE
		mInConverter = false;
#endif
		XThrowIf(err != 0 && err != kNoMoreInputRightNow, err, "convert audio packets (write)");
		if (numEncodedPackets == 0)
			break;
		Byte *buf = (Byte *)mIOBufferList.mBuffers[0].mData;
#if VERBOSE_IO
		printf("CAAudioFile::WritePacketsFromCallback: wrote %ld packets, %ld bytes\n", numEncodedPackets, mIOBufferList.mBuffers[0].mDataByteSize);
		if (mPacketDescs) {
			for (UInt32 i = 0; i < numEncodedPackets; ++i) {
				printf("  write packet %qd : offset %qd, length %ld\n", mPacketMark + i, mPacketDescs[i].mStartOffset, mPacketDescs[i].mDataByteSize);
#if VERBOSE_IO >= 2
				hexdump(buf + mPacketDescs[i].mStartOffset, mPacketDescs[i].mDataByteSize);
#endif
			}
		}
#endif
		StartTiming(this, write);
		XThrowIfError(AudioFileWritePackets(mAudioFile, mUseCache, mIOBufferList.mBuffers[0].mDataByteSize, mPacketDescs, mPacketMark, &numEncodedPackets, buf), "write audio file");
		ElapsedTime(this, write, mTicksInIO);
		mPacketMark += numEncodedPackets;
		//mNumberPackets += numEncodedPackets;
		if (mFileDataFormat.mFramesPerPacket > 0)
			mFrameMark += numEncodedPackets * mFileDataFormat.mFramesPerPacket;
		else {
			for (UInt32 i = 0; i < numEncodedPackets; ++i)
				mFrameMark += mPacketDescs[i].mVariableFramesInPacket;
		}
		if (err == kNoMoreInputRightNow)
			break;
	}
}
Exemplo n.º 8
0
void SoundRecorder::handleInputBuffer(AudioQueueRef inAQ,AudioQueueBufferRef inBuffer,const AudioTimeStamp* inStartTime,UInt32 inNumPackets,const AudioStreamPacketDescription* inPacketDesc)
	{
	/* Calculate the number of packets in the buffer if not given: */
	if(inNumPackets==0&&format.mBytesPerPacket!=0)
		inNumPackets=inBuffer->mAudioDataByteSize/format.mBytesPerPacket;
	
	/* Write the just-filled buffer to the audio file: */
	if(AudioFileWritePackets(audioFile,false,inBuffer->mAudioDataByteSize,inPacketDesc,numRecordedPackets,&inNumPackets,inBuffer->mAudioData)==noErr)
		{
		/* Update the number of written packets: */
		numRecordedPackets+=inNumPackets;
		}
	
	/* Put the just-filled buffer back into the audio queue if we're still recording: */
	if(active)
		AudioQueueEnqueueBuffer(queue,inBuffer,0,0);
	}
void DiskOutUGenInternal::writeBuffer(const int bufferSize) throw()
{
	UInt32 ioNumPackets = bufferSize;
	OSStatus status = AudioFileWritePackets(audioFile_, false,
											allocatedBlockSizeInBytes, NULL,
											currentPacket,&ioNumPackets,
											audioData);
	
	if((status != noErr) || (ioNumPackets == 0))
	{
		printf("DiskOut: error: writing file\n");
		AudioFileClose(audioFile_);
		audioFile_ = 0;
	}
	else
	{
		currentPacket += ioNumPackets;
	}	
}
Exemplo n.º 10
0
void HandleInputBuffer (
    void                                 *aqData,
    AudioQueueRef                        inAQ,
    AudioQueueBufferRef                  inBuffer,
    const AudioTimeStamp                 *inStartTime,
    UInt32                               inNumPackets,
    const AudioStreamPacketDescription   *inPacketDesc
) {
    AQRecorderState *pAqData = (AQRecorderState *) aqData;               // 1
 
    if (inNumPackets == 0 &&                                             // 2
          pAqData->mDataFormat.mBytesPerPacket != 0)
       inNumPackets =
           inBuffer->mAudioDataByteSize / pAqData->mDataFormat.mBytesPerPacket;
 
    if (AudioFileWritePackets (                                          // 3
            pAqData->mAudioFile,
            false,
            inBuffer->mAudioDataByteSize,
            inPacketDesc,
            pAqData->mCurrentPacket,
            &inNumPackets,
            inBuffer->mAudioData
        ) == noErr) {
            pAqData->mCurrentPacket += inNumPackets;}                    // 4
 
   if (pAqData->mIsRunning == 0)                                         // 5
      return;
 
    AudioQueueEnqueueBuffer (                                            // 6
        pAqData->mQueue,
        inBuffer,
        0,
        NULL
    );
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate, UInt32 outputBitRate)
{
	AudioFileID         sourceFileID = 0;
    AudioFileID         destinationFileID = 0;
    AudioConverterRef   converter = NULL;
    Boolean             canResumeFromInterruption = true; // we can continue unless told otherwise
    
    CAStreamBasicDescription srcFormat, dstFormat;
    AudioFileIO afio = {};
    
    char                         *outputBuffer = NULL;
    AudioStreamPacketDescription *outputPacketDescriptions = NULL;
    
    OSStatus error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    
    printf("\nDoConvertFile\n");
    
    try {
        // get the source file
        XThrowIfError(AudioFileOpenURL(sourceURL, kAudioFileReadPermission, 0, &sourceFileID), "AudioFileOpenURL failed");
	
        // get the source data format
        UInt32 size = sizeof(srcFormat);
        XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyDataFormat, &size, &srcFormat), "couldn't get source data format");
        
        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if the output format is PC create a 16-bit int PCM file format description as an example
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("Source File format: "); srcFormat.Print();
        printf("Destination format: "); dstFormat.Print();
	
        // create the AudioConverter
        
        XThrowIfError(AudioConverterNew(&srcFormat, &dstFormat, &converter), "AudioConverterNew failed!");
    
        // if the source has a cookie, get it and set it on the Audio Converter
        ReadCookie(sourceFileID, converter);

        // get the actual formats back from the Audio Converter
        size = sizeof(srcFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentInputStreamDescription, &size, &srcFormat), "AudioConverterGetProperty kAudioConverterCurrentInputStreamDescription failed!");

        size = sizeof(dstFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentOutputStreamDescription, &size, &dstFormat), "AudioConverterGetProperty kAudioConverterCurrentOutputStreamDescription failed!");

        printf("Formats returned from AudioConverter:\n");
        printf("              Source format: "); srcFormat.Print();
        printf("    Destination File format: "); dstFormat.Print();
        
        // if encoding to AAC set the bitrate
        // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
        // when you explicitly set the bit rate and the sample rate, this tells the encoder to stick with both bit rate and sample rate
        //     but there are combinations (also depending on the number of channels) which will not be allowed
        // if you do not explicitly set a bit rate the encoder will pick the correct value for you depending on samplerate and number of channels
        // bit rate also scales with the number of channels, therefore one bit rate per sample rate can be used for mono cases
        //    and if you have stereo or more, you can multiply that number by the number of channels.
        
        if (outputBitRate == 0) {
            outputBitRate = 192000; // 192kbs
        }
        
        if (dstFormat.mFormatID == kAudioFormatMPEG4AAC) {
            
            UInt32 propSize = sizeof(outputBitRate);
            
            // set the bit rate depending on the samplerate chosen
            XThrowIfError(AudioConverterSetProperty(converter, kAudioConverterEncodeBitRate, propSize, &outputBitRate),
                           "AudioConverterSetProperty kAudioConverterEncodeBitRate failed!");
            
            // get it back and print it out
            AudioConverterGetProperty(converter, kAudioConverterEncodeBitRate, &propSize, &outputBitRate);
            printf ("AAC Encode Bitrate: %u\n", (unsigned int)outputBitRate);
        }

        // can the Audio Converter resume conversion after an interruption?
        // this property may be queried at any time after construction of the Audio Converter after setting its output format
        // there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(converter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported - see comments in source for more info.\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld, paramErr is OK if PCM\n", error);
            }
            
            error = noErr;
        }
        
        // create the destination file 
        XThrowIfError(AudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, kAudioFileFlags_EraseFile, &destinationFileID), "AudioFileCreateWithURL failed!");

        // set up source buffers and data proc info struct
        afio.srcFileID = sourceFileID;
        afio.srcBufferSize = 32768;
        afio.srcBuffer = new char [afio.srcBufferSize];
        afio.srcFilePos = 0;
        afio.srcFormat = srcFormat;
		
        if (srcFormat.mBytesPerPacket == 0) {
            // if the source format is VBR, we need to get the maximum packet size
            // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
            // in the file (without actually scanning the whole file to find the largest packet,
            // as may happen with kAudioFilePropertyMaximumPacketSize)
            size = sizeof(afio.srcSizePerPacket);
            XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyPacketSizeUpperBound, &size, &afio.srcSizePerPacket), "AudioFileGetProperty kAudioFilePropertyPacketSizeUpperBound failed!");
            
            // how many packets can we read for our buffer size?
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            afio.packetDescriptions = new AudioStreamPacketDescription [afio.numPacketsPerRead];
        } else {
            // CBR source format
            afio.srcSizePerPacket = srcFormat.mBytesPerPacket;
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            afio.packetDescriptions = NULL;
        }

        // set up output buffers
        UInt32 outputSizePerPacket = dstFormat.mBytesPerPacket; // this will be non-zero if the format is CBR
        UInt32 theOutputBufSize = 32768;
        outputBuffer = new char[theOutputBufSize];
        
        if (outputSizePerPacket == 0) {
            // if the destination format is VBR, we need to get max size per packet from the converter
            size = sizeof(outputSizePerPacket);
            XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterPropertyMaximumOutputPacketSize, &size, &outputSizePerPacket), "AudioConverterGetProperty kAudioConverterPropertyMaximumOutputPacketSize failed!");
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
        }
        UInt32 numOutputPackets = theOutputBufSize / outputSizePerPacket;

        // if the destination format has a cookie, get it and set it on the output file
        WriteCookie(converter, destinationFileID);

        // write destination channel layout
        if (srcFormat.mChannelsPerFrame > 2) {
            WriteDestinationChannelLayout(converter, sourceFileID, destinationFileID);
        }

        UInt64 totalOutputFrames = 0; // used for debgging printf
        SInt64 outputFilePos = 0;
        
        // loop to convert data
        printf("Converting...\n");
        while (1) {

            // set up output buffer list
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = dstFormat.mChannelsPerFrame;
            fillBufList.mBuffers[0].mDataByteSize = theOutputBufSize;
            fillBufList.mBuffers[0].mData = outputBuffer;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = NO;
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the Audio Converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            // convert data
            UInt32 ioOutputDataPackets = numOutputPackets;
            printf("AudioConverterFillComplexBuffer...\n");
            error = AudioConverterFillComplexBuffer(converter, EncoderDataProc, &afio, &ioOutputDataPackets, &fillBufList, outputPacketDescriptions);
            // if interrupted in the process of the conversion call, we must handle the error appropriately
            if (error) {
                if (kAudioConverterErr_HardwareInUse == error) {
                     printf("Audio Converter returned kAudioConverterErr_HardwareInUse!\n");
                } else {
                    XThrowIfError(error, "AudioConverterFillComplexBuffer error!");
                }
            } else {
                if (ioOutputDataPackets == 0) {
                    // this is the EOF conditon
                    error = noErr;
                    break;
                }
            }
            
            if (noErr == error) {
                // write to output file
                UInt32 inNumBytes = fillBufList.mBuffers[0].mDataByteSize;
                XThrowIfError(AudioFileWritePackets(destinationFileID, false, inNumBytes, outputPacketDescriptions, outputFilePos, &ioOutputDataPackets, outputBuffer), "AudioFileWritePackets failed!");
            
                printf("Convert Output: Write %lu packets at position %lld, size: %ld\n", ioOutputDataPackets, outputFilePos, inNumBytes);
                
                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.mFramesPerPacket) { 
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.mFramesPerPacket);
                } else if (outputPacketDescriptions != NULL) {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (UInt32 i = 0; i < ioOutputDataPackets; ++i)
                        totalOutputFrames += outputPacketDescriptions[i].mVariableFramesInPacket;
                }
            }
        } // while

        if (noErr == error) {
            // write out any of the leading and trailing frames for compressed formats only
            if (dstFormat.mBitsPerChannel == 0) {
                // our output frame count should jive with
                printf("Total number of output frames counted: %lld\n", totalOutputFrames); 
                WritePacketTableInfo(converter, destinationFileID);
            }
        
            // write the cookie again - sometimes codecs will update cookies at the end of a conversion
            WriteCookie(converter, destinationFileID);
        }
    }
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // cleanup
    if (converter) AudioConverterDispose(converter);
    if (destinationFileID) AudioFileClose(destinationFileID);
	if (sourceFileID) AudioFileClose(sourceFileID);
    
    if (afio.srcBuffer) delete [] afio.srcBuffer;
    if (afio.packetDescriptions) delete [] afio.packetDescriptions;
    if (outputBuffer) delete [] outputBuffer;
    if (outputPacketDescriptions) delete [] outputPacketDescriptions;
    
    
    return error;
}
void Convert(MyAudioConverterSettings *mySettings)
{
	// create audioConverter object
	AudioConverterRef	audioConverter;
    CheckResult (AudioConverterNew(&mySettings->inputFormat, &mySettings->outputFormat, &audioConverter),
				 "AudioConveterNew failed");
	
	// allocate packet descriptions if the input file is VBR
	UInt32 packetsPerBuffer = 0;
	UInt32 outputBufferSize = 32 * 1024; // 32 KB is a good starting point
	UInt32 sizePerPacket = mySettings->inputFormat.mBytesPerPacket;	
	if (sizePerPacket == 0)
	{
		UInt32 size = sizeof(sizePerPacket);
        CheckResult(AudioConverterGetProperty(audioConverter, kAudioConverterPropertyMaximumOutputPacketSize, &size, &sizePerPacket),
					"Couldn't get kAudioConverterPropertyMaximumOutputPacketSize");
		
        // make sure the buffer is large enough to hold at least one packet
		if (sizePerPacket > outputBufferSize)
			outputBufferSize = sizePerPacket;
		
		packetsPerBuffer = outputBufferSize / sizePerPacket;
		mySettings->inputFilePacketDescriptions = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * packetsPerBuffer);
		
	}
	else
	{
		packetsPerBuffer = outputBufferSize / sizePerPacket;
	}
	
	// allocate destination buffer
	UInt8 *outputBuffer = (UInt8 *)malloc(sizeof(UInt8) * outputBufferSize); // CHRIS: not sizeof(UInt8*). check book text!
	
	UInt32 outputFilePacketPosition = 0; //in bytes
	while(1)
	{
		// wrap the destination buffer in an AudioBufferList
		AudioBufferList convertedData;
		convertedData.mNumberBuffers = 1;
		convertedData.mBuffers[0].mNumberChannels = mySettings->inputFormat.mChannelsPerFrame;
		convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
		convertedData.mBuffers[0].mData = outputBuffer;
		
		// now call the audioConverter to transcode the data. This function will call
		// the callback function as many times as required to fulfill the request.
		UInt32 ioOutputDataPackets = packetsPerBuffer;
		OSStatus error = AudioConverterFillComplexBuffer(audioConverter, 
														 MyAudioConverterCallback, 
														 mySettings, 
														 &ioOutputDataPackets, 
														 &convertedData, 
														 (mySettings->inputFilePacketDescriptions ? mySettings->inputFilePacketDescriptions : nil));
		if (error || !ioOutputDataPackets)
		{
			//		fprintf(stderr, "err: %ld, packets: %ld\n", err, ioOutputDataPackets);
			break;	// this is our termination condition
		}
		
		// write the converted data to the output file
		// KEVIN: QUESTION: 3rd arg seems like it should be a byte count, not packets. why does this work?
		CheckResult (AudioFileWritePackets(mySettings->outputFile,
										   FALSE,
										   ioOutputDataPackets,
										   NULL,
										   outputFilePacketPosition / mySettings->outputFormat.mBytesPerPacket, 
										   &ioOutputDataPackets,
										   convertedData.mBuffers[0].mData),
					 "Couldn't write packets to file");
		
		// advance the output file write location
		outputFilePacketPosition += (ioOutputDataPackets * mySettings->outputFormat.mBytesPerPacket);
	}
	
	AudioConverterDispose(audioConverter);
}