コード例 #1
0
void	CAAudioFileConverter::OpenOutputFile(const CAStreamBasicDescription &srcFormat, const CAStreamBasicDescription &destFormat, FSRef &destFSRef, CAAudioChannelLayout &destFileLayout)
{
	const ConversionParameters &params = mParams;
	
	// output file
	if (params.output.filePath == NULL) {
		GenerateOutputFileName(params.input.filePath, srcFormat,
					destFormat, params.output.fileType, mOutName);
	} else
		strcpy(mOutName, params.output.filePath);
	
	// deal with pre-existing output file
	if (FSPathMakeRef((UInt8 *)mOutName, &destFSRef, NULL) == noErr) {
		XThrowIf(!(params.flags & kOpt_OverwriteOutputFile), 1, "overwrite output file");
			// not allowed to overwrite
		// output file exists - delete it
		XThrowIfError(FSDeleteObject(&destFSRef), "delete output file");
	}
	// get FSRef/CFStringRef for output file
	FSRef outFolderFSRef;
	CFStringRef outFileName;
	XThrowIfError(PosixPathToParentFSRefAndName(mOutName, outFolderFSRef, outFileName), "locate output audio file");
	
	// create the output file
	mDestFile.CreateNew(outFolderFSRef, outFileName, params.output.fileType, destFormat, &destFileLayout.Layout());
	CFRelease(outFileName);
}
コード例 #2
0
ファイル: afrecord.cpp プロジェクト: arnelh/Examples
int main(int argc, const char *argv[])
{
	const char *recordFileName = NULL;

	// set up defaults
	AudioFileTypeID filetype = kAudioFileAIFFType;
	
	bool gotOutDataFormat = false;
	CAStreamBasicDescription dataFormat;
	dataFormat.mSampleRate = 44100.;	// later get this from the hardware
	dataFormat.mFormatID = kAudioFormatLinearPCM;
	dataFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
	dataFormat.mFramesPerPacket = 1;
	dataFormat.mChannelsPerFrame = 2;
	dataFormat.mBitsPerChannel = 16;
	dataFormat.mBytesPerPacket = dataFormat.mBytesPerFrame = 4;
	
	SInt32 bitrate = -1, quality = -1;
	
	// parse arguments
	for (int i = 1; i < argc; ++i) {
		const char *arg = argv[i];
		if (arg[0] != '-') {
			if (recordFileName != NULL) {
				fprintf(stderr, "may only specify one record file\n");
				usage();
			}
			recordFileName = arg;
		} else {
			arg += 1;
			if (arg[0] == 'f' || !strcmp(arg, "-file")) {
				if (++i == argc) MissingArgument();
				filetype = Parse4CharCode(argv[i], "-f | --file");
			} else if (arg[0] == 'd' || !strcmp(arg, "-data")) {
				if (++i == argc) MissingArgument();
				if (!ParseStreamDescription(argv[i], dataFormat))
					usage();
				gotOutDataFormat = true;
			} else if (arg[0] == 'b' || !strcmp(arg, "-bitrate")) {
				if (++i == argc) MissingArgument();
				bitrate = ParseInt(argv[i], "-b | --bitrate");
			} else if (arg[0] == 'q' || !strcmp(arg, "-quality")) {
				if (++i == argc) MissingArgument();
				quality = ParseInt(argv[i], "-q | --quality");
			} else {
				fprintf(stderr, "unknown argument: %s\n\n", arg - 1);
				usage();
			}
		}
	}
	
	if (recordFileName == NULL)
		usage();
	
	if (!gotOutDataFormat) {
		if (filetype == 0) {
			fprintf(stderr, "no output file or data format specified\n\n");
			usage();
		}
		if (!CAAudioFileFormats::Instance()->InferDataFormatFromFileFormat(filetype, dataFormat)) {
			fprintf(stderr, "Couldn't infer data format from file format\n\n");
			usage();
		}
	} else if (filetype == 0) {
		if (!CAAudioFileFormats::Instance()->InferFileFormatFromDataFormat(dataFormat, filetype)) {
			dataFormat.PrintFormat(stderr, "", "Couldn't infer file format from data format");
			usage();
		}
	}

	unlink(recordFileName);
	
	if (dataFormat.IsPCM())
		dataFormat.ChangeNumberChannels(2, true);
	else
		dataFormat.mChannelsPerFrame = 2;
	
	try {
		const int kNumberBuffers = 3;
		const unsigned kBufferSize = 0x8000;
		CAAudioFileRecorder recorder(kNumberBuffers, kBufferSize);
		FSRef parentDir;
		CFStringRef filename;
		XThrowIfError(PosixPathToParentFSRefAndName(recordFileName, parentDir, filename), "couldn't find output directory");
		recorder.SetFile(parentDir, filename, filetype, dataFormat, NULL);
		
		CAAudioFile &recfile = recorder.GetFile();
		if (bitrate >= 0)
			recfile.SetConverterProperty(kAudioConverterEncodeBitRate, sizeof(UInt32), &bitrate);
		if (quality >= 0)
			recfile.SetConverterProperty(kAudioConverterCodecQuality, sizeof(UInt32), &quality);

		Record(recorder);
	}
	catch (CAXException &e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, CAXException::FormatError(buf, e.mError));
		return 1;
	}
	catch (...) {
		fprintf(stderr, "An unknown error occurred\n");
		return 1;
	}
	return 0;
}
コード例 #3
0
void	Interleave(int nInputs, const char *infilenames[], const char *outfilename, const CAAudioChannelLayout *layout)
{
	const UInt32 kBufferSizeFrames = 0x8000;
	const UInt32 kBufferSizeBytes = kBufferSizeFrames * sizeof(Float32);
	class FileAndBuffer : public CAAudioFile {
	public:
		FileAndBuffer() : mBuf(NULL), mPtrs(NULL) { }
		~FileAndBuffer() { delete mBuf; delete mPtrs; }
		
		CABufferList *	mBuf;
		CABufferList *	mPtrs;
	};
	FileAndBuffer *infiles = new FileAndBuffer[nInputs], *file;
	FileAndBuffer outfile;
	int i;
	UInt32 outputChannels = 0;
	double sampleRate = 0.;
	UInt32 maxBitDepth = 0;
	CAStreamBasicDescription clientFormat;
	bool outFileCreated = false;
	
	try {
		// set up input files
		for (i = 0; i < nInputs; ++i) {
			file = &infiles[i];
			file->Open(infilenames[i]);
			const CAStreamBasicDescription &fmt = file->GetFileDataFormat();
			//fmt.PrintFormat(stdout, "", "input file");
			XThrowIf(fmt.mFormatID != kAudioFormatLinearPCM, -1, "input files must be PCM");
			outputChannels += fmt.mChannelsPerFrame;
			if (sampleRate == 0.)
				sampleRate = fmt.mSampleRate;
			else
				XThrowIf(fmt.mSampleRate != sampleRate, -1, "input files must have the same sample rate");
			if (fmt.mBitsPerChannel > maxBitDepth)
				maxBitDepth = fmt.mBitsPerChannel;
			clientFormat.mSampleRate = sampleRate;
			clientFormat.SetCanonical(fmt.mChannelsPerFrame, false);	// deinterleaved
			file->SetClientFormat(clientFormat, NULL);
			file->mBuf = CABufferList::New("readbuf", clientFormat);
			file->mBuf->AllocateBuffers(kBufferSizeBytes);
			file->mPtrs = CABufferList::New("readptrs", clientFormat);
			//clientFormat.PrintFormat(stdout, "", "input client");
		}
		
		if (layout != NULL) {
			if (AudioChannelLayoutTag_GetNumberOfChannels(layout->Tag()) != outputChannels) {
				fprintf(stderr, "Channel layout tag '%s' is inappropriate for %u channels of audio -- aborting\n", 
					CAChannelLayouts::ConstantToString(layout->Tag()), (unsigned)outputChannels);
				exit(2);
			}
		}

		// prepare output file format
		CAStreamBasicDescription outfmt;
		outfmt.mSampleRate = sampleRate;
		outfmt.mFormatID = kAudioFormatLinearPCM;
		outfmt.mFormatFlags = kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
		outfmt.mBitsPerChannel = maxBitDepth;
		outfmt.mChannelsPerFrame = outputChannels;
		outfmt.mBytesPerPacket = outfmt.mBytesPerFrame = outputChannels * (maxBitDepth >> 3);
		outfmt.mFramesPerPacket = 1;
		//outfmt.PrintFormat(stdout, "", "output file");
		
		unlink(outfilename);
		FSRef parentDir;
		CFStringRef outName;
		XThrowIfError(PosixPathToParentFSRefAndName(outfilename, parentDir, outName), "Couldn't locate output directory");
		outfile.CreateNew(parentDir, outName, kAudioFileAIFFType, outfmt, layout ? &layout->Layout() : NULL);
		outFileCreated = true;
		
		// create the output file and buffers
		clientFormat.mSampleRate = sampleRate;
		clientFormat.SetCanonical(outputChannels, false);
		outfile.SetClientFormat(clientFormat, NULL);
		//clientFormat.PrintFormat(stdout, "", "output client");

		outfile.mPtrs = CABufferList::New("writeptrs", clientFormat);

		AudioBufferList &writebufs = outfile.mPtrs->GetModifiableBufferList();
		
		while (true) {
			UInt32 maxFramesRead = 0;
			UInt32 nframes;
			int outbuf = 0;
			for (i = 0; i < nInputs; ++i) {
				file = &infiles[i];
				file->mPtrs->SetFrom(file->mBuf);
				nframes = kBufferSizeFrames;
				AudioBufferList &readbufs = file->mPtrs->GetModifiableBufferList();
				file->Read(nframes, &readbufs);
				//CAShowAudioBufferList(&readbufs, 8, 0);
				if (nframes > maxFramesRead)
					maxFramesRead = nframes;
				if (nframes < kBufferSizeFrames)
					file->mPtrs->PadWithZeroes(kBufferSizeBytes);

				memcpy(&writebufs.mBuffers[outbuf], &readbufs.mBuffers[0], 
					readbufs.mNumberBuffers * sizeof(AudioBuffer));
				outbuf += readbufs.mNumberBuffers;
			}
			if (maxFramesRead == 0)
				break;

			if (maxFramesRead < kBufferSizeFrames)
				outfile.mPtrs->SetNumBytes(maxFramesRead * sizeof(Float32));
			//CAShowAudioBufferList(&writebufs, 8, 0);
			outfile.Write(maxFramesRead, &writebufs);
			if (maxFramesRead < kBufferSizeFrames)
				break;
		}
	}
	catch (...) {
		if (outFileCreated)
			unlink(outfilename);
		delete[] infiles;
		throw;
	}
	outfile.Close();
	// input files are closed from destructors
	delete[] infiles;
}
コード例 #4
0
ファイル: main.cpp プロジェクト: fruitsamples/SimpleSDK
void WriteOutputFile (const char*	outputFilePath, 
					OSType			dataFormat, 
					Float64			srate, 
					MusicTimeStamp	sequenceLength, 
					bool			shouldPrint,
					AUGraph			inGraph,
					UInt32			numFrames,
					MusicPlayer		player)
{
		// delete existing output  file
	TestFile (outputFilePath, true);
	OSStatus result = 0;
	UInt32 size;

	CAStreamBasicDescription outputFormat;
	outputFormat.mChannelsPerFrame = 2;
	outputFormat.mSampleRate = srate;
	outputFormat.mFormatID = dataFormat;
	
	AudioFileTypeID destFileType;
	CAAudioFileFormats::Instance()->InferFileFormatFromFilename (outputFilePath, destFileType);
	
	if (dataFormat == kAudioFormatLinearPCM) {
		outputFormat.mBytesPerPacket = outputFormat.mChannelsPerFrame * 2;
		outputFormat.mFramesPerPacket = 1;
		outputFormat.mBytesPerFrame = outputFormat.mBytesPerPacket;
		outputFormat.mBitsPerChannel = 16;
		
		if (destFileType == kAudioFileWAVEType)
			outputFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
								| kLinearPCMFormatFlagIsPacked;
		else
			outputFormat.mFormatFlags = kLinearPCMFormatFlagIsBigEndian
								| kLinearPCMFormatFlagIsSignedInteger
								| kLinearPCMFormatFlagIsPacked;
	} else {
		// use AudioFormat API to fill out the rest.
		size = sizeof(outputFormat);
		require_noerr (result = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat), fail);
	}

	if (shouldPrint) {
		printf ("Writing to file: %s with format:\n* ", outputFilePath);
		outputFormat.Print();
	}
	
	FSRef parentDir;
	CFStringRef destFileName;
	require_noerr (result = PosixPathToParentFSRefAndName(outputFilePath, parentDir, destFileName), fail);

	ExtAudioFileRef outfile;
	result = ExtAudioFileCreateNew (&parentDir, destFileName, destFileType, &outputFormat, NULL, &outfile);
	CFRelease (destFileName);
	require_noerr (result, fail);

	AudioUnit outputUnit;	
	UInt32 nodeCount;
	require_noerr (result = AUGraphGetNodeCount (inGraph, &nodeCount), fail);
	
	for (UInt32 i = 0; i < nodeCount; ++i) 
	{
		AUNode node;
		require_noerr (result = AUGraphGetIndNode(inGraph, i, &node), fail);

		ComponentDescription desc;
		require_noerr (result = AUGraphNodeInfo(inGraph, node, &desc, NULL), fail);
		
		if (desc.componentType == kAudioUnitType_Output) 
		{
			require_noerr (result = AUGraphNodeInfo(inGraph, node, 0, &outputUnit), fail);
			break;
		}
	}

	{
		CAStreamBasicDescription clientFormat;
		size = sizeof(clientFormat);
		require_noerr (result = AudioUnitGetProperty (outputUnit,
													kAudioUnitProperty_StreamFormat,
													kAudioUnitScope_Output, 0,
													&clientFormat, &size), fail);
		size = sizeof(clientFormat);
		require_noerr (result = ExtAudioFileSetProperty(outfile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), fail);
		
		{
			MusicTimeStamp currentTime;
			AUOutputBL outputBuffer (clientFormat, numFrames);
			AudioTimeStamp tStamp;
			memset (&tStamp, 0, sizeof(AudioTimeStamp));
			tStamp.mFlags = kAudioTimeStampSampleTimeValid;
			int i = 0;
			int numTimesFor10Secs = (int)(10. / (numFrames / srate));
			do {
				outputBuffer.Prepare();
				AudioUnitRenderActionFlags actionFlags = 0;
				require_noerr (result = AudioUnitRender (outputUnit, &actionFlags, &tStamp, 0, numFrames, outputBuffer.ABL()), fail);

				tStamp.mSampleTime += numFrames;
				
				require_noerr (result = ExtAudioFileWrite(outfile, numFrames, outputBuffer.ABL()), fail);	

				require_noerr (result = MusicPlayerGetTime (player, &currentTime), fail);
				if (shouldPrint && (++i % numTimesFor10Secs == 0))
					printf ("current time: %6.2f beats\n", currentTime);
			} while (currentTime < sequenceLength);
		}
	}
	
// close
	ExtAudioFileDispose(outfile);

	return;

fail:
	printf ("Problem: %ld\n", result); 
	exit(1);
}
コード例 #5
0
ファイル: auprocess.cpp プロジェクト: arnelh/Examples
int main(int argc, const char * argv[])
{
	setbuf (stdout, NULL);


#if TARGET_OS_MAC
	{
		thread_extended_policy_data_t		theFixedPolicy;
		theFixedPolicy.timeshare = false;	// set to true for a non-fixed thread
		thread_policy_set(pthread_mach_thread_np(pthread_self()), 
													THREAD_EXTENDED_POLICY, 
													(thread_policy_t)&theFixedPolicy, 
													THREAD_EXTENDED_POLICY_COUNT);

		// We keep a reference to the spawning thread's priority around (initialized in the constructor), 
		// and set the importance of the child thread relative to the spawning thread's priority.
		thread_precedence_policy_data_t		thePrecedencePolicy;
		
		thePrecedencePolicy.importance = 63 - 36;
		thread_policy_set(pthread_mach_thread_np(pthread_self()), 
													THREAD_PRECEDENCE_POLICY, 
													(thread_policy_t)&thePrecedencePolicy, 
													THREAD_PRECEDENCE_POLICY_COUNT);
	}
#endif


// These are the variables that are set up from the input parsing
	char* srcFilePath = NULL;
	char* destFilePath = NULL;
	char* auPresetFile = NULL;
	bool shortMemoryProfile = false;
	OSType manu, subType, type = 0;
	int userSetFrames = -1;
	
	for (int i = 1; i < argc; ++i)
	{
		if (strcmp (argv[i], "-au") == 0) {
            if ( (i + 3) < argc ) {                
                StrToOSType (argv[i + 1], type);
                StrToOSType (argv[i + 2], subType);
                StrToOSType (argv[i + 3], manu);
				i += 3;
			} else {
				printf ("Which Audio Unit:\n%s", usageStr);
				exit(1);
			}
		}
		else if (strcmp (argv[i], "-i") == 0) {
			srcFilePath = const_cast<char*>(argv[++i]);
		}
		else if (strcmp (argv[i], "-o") == 0) {
			destFilePath = const_cast<char*>(argv[++i]);
		}
		else if (strcmp (argv[i], "-p") == 0) {
			auPresetFile = const_cast<char*>(argv[++i]);
		}
		else if (strcmp (argv[i], "-m") == 0) {
			shortMemoryProfile = true;
		}
		else if (strcmp (argv[i], "-f") == 0) {
			sscanf(argv[++i], "%d", &userSetFrames);
		}
		else {
			printf ("%s\n", usageStr);
			exit(1);
		}
	}
	
	if (!type || !srcFilePath) {
		printf ("%s\n", usageStr);
		exit(1);
	}
	if (!destFilePath) {
		if (!shortMemoryProfile) {
			printf ("%s\n", usageStr);
			exit(1);
		}
	}
			// delete pre-existing output file
	if (!shortMemoryProfile) {
		FSRef destFSRef;
		if (FSPathMakeRef((UInt8 *)destFilePath, &destFSRef, NULL) == noErr) {
			// output file exists - delete it
			if (FSDeleteObject(&destFSRef)) {
				printf ("Cannot Delete Output File\n");
				exit(1);
			}
		}
	}
	
	CAComponentDescription desc(type, subType, manu);
	
	CFPropertyListRef presetDict = ReadPresetFromPresetFile(auPresetFile);
	
		// the num of frames to use when processing the file with the Render call
	UInt32 maxFramesToUse = shortMemoryProfile ? 512 : 32768;

		// not set from command line
	if (userSetFrames > 0) {
		maxFramesToUse = userSetFrames; 
	}
		
		// in some settings (for instance a delay with 100% feedback) tail time is essentially infinite
		// so you should safeguard the final OL render stage (post process) which is aimed at pulling the tail through
		// if you want to bypass this completely, just set this to zero.
	Float64 maxTailTimeSecs = 10.;
	
#pragma mark -
#pragma mark __ The driving code
#pragma mark -

	try 
	{
		CAComponent comp(desc);
			
			 // CAAUProcessor's constructor throws... so make sure the component is valid
		if (comp.IsValid() == false) {
			printf ("Can't Find Component\n");
			desc.Print();
			exit(1);
		}
			
		CAAUProcessor processor(comp);
													processor.AU().Print();
		
		CAAudioFile srcFile;
		CAAudioFile destFile; 
		
		srcFile.Open(srcFilePath);

		CAStreamBasicDescription procFormat (srcFile.GetFileDataFormat());
		procFormat.SetCanonical (srcFile.GetFileDataFormat().NumberChannels(), false);

													printf ("Processing Format:\n\t");
													procFormat.Print();
		
		
		if (!shortMemoryProfile) {
			FSRef parentDir;
			CFStringRef filename;
			PosixPathToParentFSRefAndName(destFilePath, parentDir, filename);
			destFile.CreateNew (parentDir, filename, 'AIFF', srcFile.GetFileDataFormat());
			destFile.SetClientFormat (procFormat);
		}
	
		srcFile.SetClientFormat (procFormat);
		
		AUOutputBL outputList(procFormat);

		ReadBuffer* readBuf = NULL;	

#if !CAAF_USE_EXTAUDIOFILE
		UInt64 numInputSamples = srcFile.GetNumberPackets();
#else
		UInt64 numInputSamples = srcFile.GetNumberFrames();
#endif
	
		if (shortMemoryProfile) {
			readBuf = new ReadBuffer;
			readBuf->readData = new AUOutputBL(procFormat);
			readBuf->readFrames = 0;
			UInt32 numFrames = UInt32(procFormat.mSampleRate / 2);
			readBuf->readData->Allocate (numFrames); // half a second of audio data
			readBuf->readData->Prepare(); // half a second of audio data
				
				// read 1/2 second of audio into this read buffer
			srcFile.Read (numFrames, readBuf->readData->ABL());
			
			sInputCallback.inputProc = MemoryInputCallback;
			sInputCallback.inputProcRefCon = readBuf;
			numInputSamples = numFrames;
		}
		else {
			if (desc.IsFConv()) {
				maxFramesToUse = userSetFrames == -1 ? 512 : maxFramesToUse; 
				// some format converter's can call you several times in small granularities
				// so you can't use a large buffer to render or you won't return all of the input data
				// this also lessens the final difference between what you should get and what you do
				// converter units *really* should have a version that are offline AU's to 
				// handle this for you.
				sInputCallback.inputProc = FConvInputCallback;
			} else
				sInputCallback.inputProc = InputCallback;
			
			sInputCallback.inputProcRefCon = &srcFile;
		}
				
		OSStatus result;
		require_noerr (result = processor.EstablishInputCallback (sInputCallback), home);
		require_noerr (result = processor.SetMaxFramesPerRender (maxFramesToUse), home); 
		processor.SetMaxTailTime (maxTailTimeSecs);
		require_noerr (result = processor.Initialize (procFormat, numInputSamples), home);
		if (presetDict) {
			require_noerr (result = processor.SetAUPreset (presetDict), home);
			CFRelease (presetDict);
		}
			// this does ALL of the preflighting.. could be specialise for an OfflineAU type
			// to do this piecemeal and do a progress bar by using the OfflineAUPreflight method
		require_noerr (result = processor.Preflight (), home);
		
		bool isDone; isDone = false;
		bool needsPostProcessing;
		bool isSilence;
		UInt32 numFrames; numFrames = processor.MaxFramesPerRender();

#if CA_AU_PROFILE_TIME
		sReadTime = 0;
		sRenderTime = 0;
#endif
					
PRINT_MARKS();
			// this is the render loop
		while (!isDone) 
		{
											#if CA_AU_PROFILE_TIME 
												UInt64 now = CAHostTimeBase::GetTheCurrentTime(); 
											#endif
			outputList.Prepare(); // have to do this every time...
			require_noerr (result = processor.Render (outputList.ABL(), numFrames, isSilence, &isDone,
											&needsPostProcessing), home);
											#if CA_AU_PROFILE_TIME 
												sRenderTime += (CAHostTimeBase::GetTheCurrentTime() - now);
											#endif

if (!shortMemoryProfile)
	PRINT_PROGRESS(processor.GetOLPercentComplete());
else
	PRINT_PROGRESS(((processor.SampleTime() / numInputSamples) * 100.));
	
			if (numFrames && !shortMemoryProfile)
				destFile.Write (numFrames, outputList.ABL());
		}
			
			// this is the postprocessing if needed
		if (!shortMemoryProfile && needsPostProcessing) 
		{
			isDone = false;
			numFrames = processor.MaxFramesPerRender();
			while (!isDone) {
				outputList.Prepare(); // have to do this every time...
											#if CA_AU_PROFILE_TIME 
												UInt64 now = CAHostTimeBase::GetTheCurrentTime(); 
											#endif
				require_noerr (result = processor.PostProcess (outputList.ABL(), numFrames, 
													isSilence, isDone), home);
											#if CA_AU_PROFILE_TIME 
												sRenderTime += (CAHostTimeBase::GetTheCurrentTime() - now); 
											#endif

PRINT_PROGRESS(processor.GetOLPercentComplete());

				if (numFrames && !shortMemoryProfile)
					destFile.Write (numFrames, outputList.ABL());
			}
		}

printf ("\n");

home:
		if (result) {
			printf ("Exit with bad result:%ld\n", result);
			exit(result);
		}
		
		if (readBuf) {
			delete readBuf->readData;
			delete readBuf;
		}
					
#if CA_AU_PROFILE_TIME
	if (!shortMemoryProfile) {
			// this flushes any remaing data to be written to the disk. 
			// the source file is closed in its destructor of course
		destFile.Close(); 
			// open the file again, to get stats about it for profiling
		destFile.Open(destFilePath);
	}

	SInt64 numWritten;
	if (shortMemoryProfile)
		numWritten = 0;
	else {
#if !CAAF_USE_EXTAUDIOFILE
		numWritten = destFile.GetNumberPackets();
#else
		numWritten = destFile.GetNumberFrames();
#endif
	}

	printf ("Read File Time:%.2f secs for %lld packets (%.1f secs), wrote %lld packets\n", 
						(CAHostTimeBase::ConvertToNanos (sReadTime) / 1.0e9),
						numInputSamples,
						(numInputSamples / procFormat.mSampleRate),
						numWritten);

	if (!shortMemoryProfile) 
	{
#if !CAAF_USE_EXTAUDIOFILE
		UInt64 numOutputSamples = destFile.GetNumberPackets();
#else
		UInt64 numOutputSamples = destFile.GetNumberFrames();
#endif
	
		if (numOutputSamples == numInputSamples) {
			printf ("\tWrote the same number of packets as read\n");
		} else {
			bool expectationMet = !desc.IsOffline(); // we don't have any expectations for offline AU's
			if (processor.LatencySampleCount() || processor.TailSampleCount()) {
				if (numOutputSamples - numInputSamples == processor.TailSampleCount())
					expectationMet = true;
				if (expectationMet)	
					printf ("Correctly wrote \'Read Size + Tail\'. ");
				printf ("AU reports (samples): %ld latency, %ld tail\n", 
										processor.LatencySampleCount(), processor.TailSampleCount());
			}
			if (expectationMet == false) 
			{
				if (numOutputSamples > numInputSamples) {
					printf ("\tWrote %lld packets (%.2f secs) more than read\n", 
								(numOutputSamples - numInputSamples), 
								((numOutputSamples - numInputSamples) / procFormat.mSampleRate));
				} else {
					printf ("\tRead %lld packets (%.2f secs) more than wrote\n", 
								(numInputSamples - numOutputSamples), 
								((numInputSamples - numOutputSamples) / procFormat.mSampleRate));
				}
			}
		}
	}
	
	Float64 renderTimeSecs = CAHostTimeBase::ConvertToNanos (sRenderTime - sReadTime) / 1.0e9;
	printf ("Total Render Time:%.2f secs, using render slice size of %ld frames\n", 
							renderTimeSecs, maxFramesToUse);
	
	Float64 cpuUsage;
	if (shortMemoryProfile)
		cpuUsage = (renderTimeSecs / 0.5) * 100.;
	else
		cpuUsage = (renderTimeSecs / (numInputSamples / procFormat.mSampleRate)) * 100.;
	printf ("CPU Usage for Render Time:%.2f%%\n", cpuUsage);

	CFStringRef str = comp.GetCompName();
	UInt32 compNameLen = CFStringGetLength (str);
	
	CFStringRef presetName = NULL;
	if (auPresetFile) {
		CFPropertyListRef dict;
		if (processor.AU().GetAUPreset (dict) == noErr) {
			presetName = (CFStringRef)CFDictionaryGetValue((CFDictionaryRef)dict, CFSTR("name"));
			CFRelease (dict);
		}
	}

	UInt32 presetLen = presetName ? CFStringGetLength(presetName) : 0;

	char* cstr = (char*)malloc (compNameLen + presetLen + 2 + 1);
	CFStringGetCString (str, cstr, (CFStringGetLength (str) + 1), kCFStringEncodingASCII);
	if (presetName) {
		cstr[compNameLen] = ':';
		cstr[compNameLen+1] = ':';
		CFStringGetCString (presetName, cstr + compNameLen + 2, (CFStringGetLength (presetName) + 1), kCFStringEncodingASCII);
	}
	PerfResult("AudioUnitProcess", EndianU32_NtoB(comp.Desc().componentSubType), cstr, cpuUsage, "%realtime");
	free (cstr);
#endif


	}
	catch (CAXException &e) {
		char buf[256];
		printf("Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
		exit(1);
	}
	catch (...) {
		printf("An unknown error occurred\n");
		exit(1);
	}
			
	return 0;
}