Ejemplo n.º 1
0
// ____________________________________________________________________________________
// AudioQueue callback function, called when an input buffers has been filled.
static void MyInputBufferHandler(	void *                          inUserData,
									AudioQueueRef                   inAQ,
									AudioQueueBufferRef             inBuffer,
									const AudioTimeStamp *          inStartTime,
									UInt32							inNumPackets,
									const AudioStreamPacketDescription *inPacketDesc)
{
	MyRecorder *aqr = (MyRecorder *)inUserData;

	try {
		if (aqr->verbose) {
			printf("buf data %p, 0x%x bytes, 0x%x packets\n", inBuffer->mAudioData,
				(int)inBuffer->mAudioDataByteSize, (int)inNumPackets);
		}
		
		if (inNumPackets > 0) {
			// write packets to file
			XThrowIfError(AudioFileWritePackets(aqr->recordFile, FALSE, inBuffer->mAudioDataByteSize,
				inPacketDesc, aqr->recordPacket, &inNumPackets, inBuffer->mAudioData),
				"AudioFileWritePackets failed");
			aqr->recordPacket += inNumPackets;
		}

		// if we're not stopping, re-enqueue the buffe so that it gets filled again
		if (aqr->running)
			XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
	} 
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "MyInputBufferHandler: %s (%s)\n", e.mOperation, e.FormatError(buf));
	}	
}
Ejemplo n.º 2
0
void	CAAudioFileConverter::OpenOutputFile(const CAStreamBasicDescription &srcFormat, const CAStreamBasicDescription &destFormat, FSRef &destFSRef, CAAudioChannelLayout &destFileLayout)
{
	const ConversionParameters &params = mParams;
	
	// output file
	if (params.output.filePath == NULL) {
		GenerateOutputFileName(params.input.filePath, srcFormat,
					destFormat, params.output.fileType, mOutName);
	} else
		strcpy(mOutName, params.output.filePath);
	
	// deal with pre-existing output file
	if (FSPathMakeRef((UInt8 *)mOutName, &destFSRef, NULL) == noErr) {
		XThrowIf(!(params.flags & kOpt_OverwriteOutputFile), 1, "overwrite output file");
			// not allowed to overwrite
		// output file exists - delete it
		XThrowIfError(FSDeleteObject(&destFSRef), "delete output file");
	}
	// get FSRef/CFStringRef for output file
	FSRef outFolderFSRef;
	CFStringRef outFileName;
	XThrowIfError(PosixPathToParentFSRefAndName(mOutName, outFolderFSRef, outFileName), "locate output audio file");
	
	// create the output file
	mDestFile.CreateNew(outFolderFSRef, outFileName, params.output.fileType, destFormat, &destFileLayout.Layout());
	CFRelease(outFileName);
}
Ejemplo n.º 3
0
void	GetFormatFromInputFile (AudioFileID inputFile, CAStreamBasicDescription & inputFormat)
{
	bool doPrint = true;
	UInt32 size;
	XThrowIfError(AudioFileGetPropertyInfo(inputFile,
                                           kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info");
	UInt32 numFormats = size / sizeof(AudioFormatListItem);
	AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ];
    
	XThrowIfError(AudioFileGetProperty(inputFile,
                                       kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format");
	numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it
	if (numFormats == 1) {
        // this is the common case
		inputFormat = formatList[0].mASBD;
	} else {
		if (doPrint) {
			printf ("File has a %d layered data format:\n", (int)numFormats);
			for (unsigned int i = 0; i < numFormats; ++i)
				CAStreamBasicDescription(formatList[i].mASBD).Print();
			printf("\n");
		}
		// now we should look to see which decoders we have on the system
		XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's");
		UInt32 numDecoders = size / sizeof(OSType);
		OSType *decoderIDs = new OSType [ numDecoders ];
		XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's");
		unsigned int i = 0;
		for (; i < numFormats; ++i) {
			OSType decoderID = formatList[i].mASBD.mFormatID;
			bool found = false;
			for (unsigned int j = 0; j < numDecoders; ++j) {
				if (decoderID == decoderIDs[j]) {
					found = true;
					break;
				}
			}
			if (found) break;
		}
		delete [] decoderIDs;
		
		if (i >= numFormats) {
			fprintf (stderr, "Cannot play any of the formats in this file\n");
			throw kAudioFileUnsupportedDataFormatError;
		}
		inputFormat = formatList[i].mASBD;
	}
	delete [] formatList;
}
Ejemplo n.º 4
0
// _______________________________________________________________________________________
//
// call for existing file, NOT new one - from Open() or Wrap()
void	CAAudioFile::GetExistingFileInfo()
{
	LOG_FUNCTION("CAAudioFile::GetExistingFileInfo", "%p", this);
	UInt32 propertySize;
	OSStatus err;

	// get mFileDataFormat
	propertySize = sizeof(AudioStreamBasicDescription);
	XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyDataFormat, &propertySize, &mFileDataFormat), "get audio file's data format");

	// get mFileChannelLayout
	err = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyChannelLayout, &propertySize, NULL);
	if (err == noErr && propertySize > 0) {
		AudioChannelLayout *layout = static_cast<AudioChannelLayout *>(malloc(propertySize));
		err = AudioFileGetProperty(mAudioFile, kAudioFilePropertyChannelLayout, &propertySize, layout);
		if (err == noErr) {
			mFileChannelLayout = layout;
#if VERBOSE_CHANNELMAP
			printf("existing file's channel layout: %s\n", CAChannelLayouts::ConstantToString(mFileChannelLayout.Tag()));
#endif
		}
		free(layout);
		XThrowIfError(err, "get audio file's channel layout");
	}
	if (mMode != kReading)
		return;

#if 0
	// get mNumberPackets
	propertySize = sizeof(mNumberPackets);
	XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyAudioDataPacketCount, &propertySize, &mNumberPackets), "get audio file's packet count");
#if VERBOSE_IO
	printf("CAAudioFile::GetExistingFileInfo: %qd packets\n", mNumberPackets);
#endif
#endif

	// get mMagicCookie
	err = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyMagicCookieData, &propertySize, NULL);
	if (err == noErr && propertySize > 0) {
		mMagicCookie = new Byte[propertySize];
		mMagicCookieSize = propertySize;
		XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyMagicCookieData, &propertySize, mMagicCookie), "get audio file's magic cookie");
	}
	InitFileMaxPacketSize();
	mPacketMark = 0;
	mFrameMark = 0;

	UpdateClientMaxPacketSize();
}
Ejemplo n.º 5
0
// _______________________________________________________________________________________
//
CFArrayRef  CAAudioFile::GetConverterConfig()
{
	CFArrayRef plist;
	UInt32 propertySize = sizeof(plist);
	XThrowIfError(AudioConverterGetProperty(mConverter, kAudioConverterPropertySettings, &propertySize, &plist), "get converter property settings");
	return plist;
}
Ejemplo n.º 6
0
// _______________________________________________________________________________________
//
void	CAAudioFile::FlushEncoder()
{
	if (mConverter != NULL) {
		mFinishingEncoding = true;
		WritePacketsFromCallback(WriteInputProc, this);
		mFinishingEncoding = false;

		// get priming info from converter, set it on the file
		if (mFileDataFormat.mBitsPerChannel == 0) {
			UInt32 propertySize;
			OSStatus err;
			AudioConverterPrimeInfo primeInfo;
			propertySize = sizeof(primeInfo);

			err = AudioConverterGetProperty(mConverter, kAudioConverterPrimeInfo, &propertySize, &primeInfo);
			if (err == noErr) {
				AudioFilePacketTableInfo pti;
				propertySize = sizeof(pti);
				err = AudioFileGetProperty(mAudioFile, kAudioFilePropertyPacketTableInfo, &propertySize, &pti);
				if (err == noErr) {
//printf("old packet table info: %qd valid, %ld priming, %ld remainder\n", pti.mNumberValidFrames, pti.mPrimingFrames, pti.mRemainderFrames);
					UInt64 totalFrames = pti.mNumberValidFrames + pti.mPrimingFrames + pti.mRemainderFrames;
					pti.mPrimingFrames = primeInfo.leadingFrames;
					pti.mRemainderFrames = primeInfo.trailingFrames;
					pti.mNumberValidFrames = totalFrames - pti.mPrimingFrames - pti.mRemainderFrames;
//printf("new packet table info: %qd valid, %ld priming, %ld remainder\n", pti.mNumberValidFrames, pti.mPrimingFrames, pti.mRemainderFrames);
					XThrowIfError(AudioFileSetProperty(mAudioFile, kAudioFilePropertyPacketTableInfo, sizeof(pti), &pti), "couldn't set packet table info on audio file");
				}
			}
		}
	}
}
Ejemplo n.º 7
0
// _______________________________________________________________________________________
//
void	CAAudioFile::Write(UInt32 numPackets, const AudioBufferList *data)
{
	if (mIOBufferList.mBuffers[0].mData == NULL) {
#if DEBUG
		printf("warning: CAAudioFile::AllocateBuffers called from WritePackets\n");
#endif
		AllocateBuffers();
	}

	if (mMode == kPreparingToWrite)
		mMode = kWriting;
	else
		XThrowIf(mMode != kWriting, kExtAudioFileError_InvalidOperationOrder, "can't write to this file");
	if (mConverter != NULL) {
		mWritePackets = numPackets;
		mWriteBufferList->SetFrom(data);
		WritePacketsFromCallback(WriteInputProc, this);
	} else {
		StartTiming(this, write);
		XThrowIfError(AudioFileWritePackets(mAudioFile, mUseCache, data->mBuffers[0].mDataByteSize,
						NULL, mPacketMark, &numPackets, data->mBuffers[0].mData),
						"write audio file");
		ElapsedTime(this, write, mTicksInIO);
#if VERBOSE_IO
		printf("CAAudioFile::WritePackets: wrote %ld packets at %qd, %ld bytes\n", numPackets, mPacketMark, data->mBuffers[0].mDataByteSize);
#endif
		//mNumberPackets =
		mPacketMark += numPackets;
		if (mFileDataFormat.mFramesPerPacket > 0)
			mFrameMark += numPackets * mFileDataFormat.mFramesPerPacket;
		// else: shouldn't happen since we're only called when there's no converter
	}
}
Ejemplo n.º 8
0
// _______________________________________________________________________________________
//
void	CAAudioFile::UpdateClientMaxPacketSize()
{
	LOG_FUNCTION("CAAudioFile::UpdateClientMaxPacketSize", "%p", this);
	mFrame0Offset = 0;
	if (mConverter != NULL) {
		AudioConverterPropertyID property = (mMode == kReading) ?
			kAudioConverterPropertyMaximumOutputPacketSize :
			kAudioConverterPropertyMaximumInputPacketSize;

		UInt32 propertySize = sizeof(UInt32);
		XThrowIfError(AudioConverterGetProperty(mConverter, property, &propertySize, &mClientMaxPacketSize),
			"get audio converter's maximum packet size");

		if (mFileDataFormat.mBitsPerChannel == 0) {
			AudioConverterPrimeInfo primeInfo;
			propertySize = sizeof(primeInfo);
			OSStatus err = AudioConverterGetProperty(mConverter, kAudioConverterPrimeInfo, &propertySize, &primeInfo);
			if (err == noErr)
				mFrame0Offset = primeInfo.leadingFrames;
#if VERBOSE_CONVERTER
			printf("kAudioConverterPrimeInfo: err = %ld, leadingFrames = %ld\n", err, mFrame0Offset);
#endif
		}
	} else {
		mClientMaxPacketSize = mFileMaxPacketSize;
	}
}
Ejemplo n.º 9
0
// ____________________________________________________________________________________
// Copy a queue's encoder's magic cookie to an audio file.
static void MyCopyEncoderCookieToFile(AudioQueueRef theQueue, AudioFileID theFile)
{
	OSStatus err;
	UInt32 propertySize;
	
	// get the magic cookie, if any, from the converter		
	err = AudioQueueGetPropertySize(theQueue, kAudioConverterCompressionMagicCookie, &propertySize);
	
	if (err == noErr && propertySize > 0) {
		// there is valid cookie data to be fetched;  get it
		Byte *magicCookie = (Byte *)malloc(propertySize);
		try {
			XThrowIfError(AudioQueueGetProperty(theQueue, kAudioConverterCompressionMagicCookie, magicCookie,
				&propertySize), "get audio converter's magic cookie");
			// now set the magic cookie on the output file
            // even though some formats have cookies, some files don't take them, so we ignore the error
			/*err =*/ AudioFileSetProperty(theFile, kAudioFilePropertyMagicCookieData, propertySize, magicCookie);
		} 
		catch (CAXException e) {
			char buf[256];
			fprintf(stderr, "MyCopyEncoderCookieToFile: %s (%s)\n", e.mOperation, e.FormatError(buf));
		}
        catch (...) {
            fprintf(stderr, "MyCopyEncoderCookieToFile: Unexpected exception\n");
        }
		free(magicCookie);
	}
}
Ejemplo n.º 10
0
// ____________________________________________________________________________________
// Determine the size, in bytes, of a buffer necessary to represent the supplied number
// of seconds of audio data.
static int MyComputeRecordBufferSize(const AudioStreamBasicDescription *format, AudioQueueRef queue, float seconds)
{
	int packets, frames, bytes;
	
	frames = (int)ceil(seconds * format->mSampleRate);
	
	if (format->mBytesPerFrame > 0)
		bytes = frames * format->mBytesPerFrame;
	else {
		UInt32 maxPacketSize;
		if (format->mBytesPerPacket > 0)
			maxPacketSize = format->mBytesPerPacket;	// constant packet size
		else {
			UInt32 propertySize = sizeof(maxPacketSize); 
			XThrowIfError(AudioQueueGetProperty(queue, kAudioConverterPropertyMaximumOutputPacketSize, &maxPacketSize,
				&propertySize), "couldn't get queue's maximum output packet size");
		}
		if (format->mFramesPerPacket > 0)
			packets = frames / format->mFramesPerPacket;
		else
			packets = frames;	// worst-case scenario: 1 frame in a packet
		if (packets == 0)		// sanity check
			packets = 1;
		bytes = packets * maxPacketSize;
	}
	return bytes;
}
Ejemplo n.º 11
0
void	CAAudioFileConverter::ReadCAFInfo()
{
	FSRef fsref;
	AudioFileID afid = 0;
	CAFSourceInfo info;
	UInt32 size;
	OSStatus err;
	
	try {
		XThrowIfError(FSPathMakeRef((UInt8 *)mParams.input.filePath, &fsref, NULL), "couldn't locate input file");
		XThrowIfError(AudioFileOpen(&fsref, fsRdPerm, 0, &afid), "couldn't open input file");
		size = sizeof(AudioFileTypeID);
		XThrowIfError(AudioFileGetProperty(afid, kAudioFilePropertyFileFormat, &size, &info.filetype), "couldn't get input file's format");
		if (info.filetype == kAudioFileCAFType) {
			size = sizeof(info);
			err = AudioFileGetUserData(afid, 'srcI', 0, &size, &info);
			if (!err) {
				// restore the following from the original file info:
				//	filetype
				//	data format
				//	filename
				AudioStreamBasicDescription destfmt;
				ASBD_BtoN((AudioStreamBasicDescription *)info.asbd, &destfmt);
				mParams.output.dataFormat = destfmt;
				mParams.output.fileType = EndianU32_BtoN(info.filetype);
				if (mParams.output.filePath == NULL) {
					int len = strlen(mParams.input.filePath) + strlen(info.filename) + 2;
					char *newname = (char *)malloc(len);	// $$$ leaked
					
					const char *dir = dirname(mParams.input.filePath);
					if (dir && (dir[0] !='.' && dir[1] != '/'))
						sprintf(newname, "%s/%s", dir, info.filename);
					else
						strcpy(newname, info.filename);
					mParams.output.filePath = newname;
					mParams.flags = (mParams.flags & ~kOpt_OverwriteOutputFile) | kOpt_NoSanitizeOutputFormat;
				}
			}
		}
		AudioFileClose(afid);
	}
	catch (...) {
		if (afid)
			AudioFileClose(afid);
		throw;
	}
}
Ejemplo n.º 12
0
// _______________________________________________________________________________________
//
SInt64  CAAudioFile::FileDataOffset()
{
	if (mFileDataOffset < 0) {
		UInt32 propertySize = sizeof(SInt64);
		XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyDataOffset, &propertySize, &mFileDataOffset), "couldn't get file's data offset");
	}
	return mFileDataOffset;
}
Ejemplo n.º 13
0
Archivo: main.cpp Proyecto: ebakan/SMS
int main (int argc, char * const argv[]) 
{
    char inputFile[]="blip.mp3";

    static const double threshold=0.50;
    int hardware=macbookpro;
    double x,y,z,prev_x,prev_y,prev_z;

    AudioFileID audioFile;

    CFURLRef theURL = CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault, (UInt8*)inputFile, strlen(inputFile), false);

    
    XThrowIfError (AudioFileOpenURL (theURL, kAudioFileReadPermission, 0, &audioFile), "AudioFileOpenURL");
		    
		    // get the number of channels of the file
    CAStreamBasicDescription fileFormat;
    UInt32 propsize = sizeof(CAStreamBasicDescription);
    XThrowIfError (AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat, &propsize, &fileFormat), "AudioFileGetProperty");

// lets set up our playing state now
    AUGraph theGraph;
    CAAudioUnit fileAU;

// this makes the graph, the file AU and sets it all up for playing
    MakeSimpleGraph (theGraph, fileAU, fileFormat, audioFile);
	    

// now we load the file contents up for playback before we start playing
// this has to be done the AU is initialized and anytime it is reset or uninitialized
    Float64 fileDuration = PrepareFileAU (fileAU, fileFormat, audioFile);
    printf ("file duration: %f secs\n", fileDuration);

    read_sms_real(hardware,&x,&y,&z);
    prev_x=x;
    prev_y=y;
    prev_z=z;
    for(;;) {
	read_sms_real(hardware,&x,&y,&z);
	//printf("x: %f y: %f z: %f\n",x,y,z);
	if(isDelta(threshold,x,y,z,prev_x,prev_y,prev_z))
	    XThrowIfError (AUGraphStart (theGraph), "AUGraphStart");
	prev_x=x;
	prev_y=y;
	prev_z=z;
    }

// sleep until the file is finished
    //usleep ((int)(fileDuration * 1000. * 1000.));

// lets clean up
    XThrowIfError (AUGraphStop (theGraph), "AUGraphStop");
    XThrowIfError (AUGraphUninitialize (theGraph), "AUGraphUninitialize");
    XThrowIfError (AudioFileClose (audioFile), "AudioFileClose");
    XThrowIfError (AUGraphClose (theGraph), "AUGraphClose");
    
    return 0;
}	
Ejemplo n.º 14
0
// _______________________________________________________________________________________
//
void	CAAudioFile::Open(const FSRef &fsref)
{
	LOG_FUNCTION("CAAudioFile::Open", "%p", this);
	XThrowIf(mMode != kClosed, kExtAudioFileError_InvalidOperationOrder, "file already open");
	mFSRef = fsref;
	XThrowIfError(AudioFileOpen(&mFSRef, fsRdPerm, 0, &mAudioFile), "open audio file");
	mOwnOpenFile = true;
	mMode = kReading;
	GetExistingFileInfo();
}
Ejemplo n.º 15
0
void	CAAudioFileConverter::WriteCAFInfo()
{
	FSRef fsref;
	AudioFileID afid = 0;
	CAFSourceInfo info;
	UInt32 size;
	
	try {
		XThrowIfError(FSPathMakeRef((UInt8 *)mParams.input.filePath, &fsref, NULL), "couldn't locate input file");
		XThrowIfError(AudioFileOpen(&fsref, fsRdPerm, 0, &afid), "couldn't open input file");
		size = sizeof(AudioFileTypeID);
		XThrowIfError(AudioFileGetProperty(afid, kAudioFilePropertyFileFormat, &size, &info.filetype), "couldn't get input file's format");
		AudioFileClose(afid);
		afid = 0;
		
		XThrowIfError(FSPathMakeRef((UInt8 *)mOutName, &fsref, NULL), "couldn't locate output file");
		XThrowIfError(AudioFileOpen(&fsref, fsRdWrPerm, 0, &afid), "couldn't open output file");
		const char *srcFilename = strrchr(mParams.input.filePath, '/');
		if (srcFilename++ == NULL) srcFilename = mParams.input.filePath;
		ASBD_NtoB(&mSrcFormat, (AudioStreamBasicDescription *)info.asbd);
		int namelen = std::min(kMaxFilename-1, (int)strlen(srcFilename));
		memcpy(info.filename, srcFilename, namelen);
		info.filename[namelen++] = 0;
		info.filetype = EndianU32_NtoB(info.filetype);
		
		XThrowIfError(AudioFileSetUserData(afid, 'srcI', 0, offsetof(CAFSourceInfo, filename) + namelen, &info), "couldn't set CAF file's source info chunk");
		AudioFileClose(afid);
	}
	catch (...) {
		if (afid)
			AudioFileClose(afid);
		throw;
	}
}
Ejemplo n.º 16
0
Archivo: main.cpp Proyecto: ebakan/SMS
double PrepareFileAU (CAAudioUnit &au, CAStreamBasicDescription &fileFormat, AudioFileID audioFile)
{	
// 
		// calculate the duration
	UInt64 nPackets;
	UInt32 propsize = sizeof(nPackets);
	XThrowIfError (AudioFileGetProperty(audioFile, kAudioFilePropertyAudioDataPacketCount, &propsize, &nPackets), "kAudioFilePropertyAudioDataPacketCount");
		
	Float64 fileDuration = (nPackets * fileFormat.mFramesPerPacket) / fileFormat.mSampleRate;

	ScheduledAudioFileRegion rgn;
	memset (&rgn.mTimeStamp, 0, sizeof(rgn.mTimeStamp));
	rgn.mTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
	rgn.mTimeStamp.mSampleTime = 0;
	rgn.mCompletionProc = NULL;
	rgn.mCompletionProcUserData = NULL;
	rgn.mAudioFile = audioFile;
	rgn.mLoopCount = 1;
	rgn.mStartFrame = 0;
	rgn.mFramesToPlay = UInt32(nPackets * fileFormat.mFramesPerPacket);
		
		// tell the file player AU to play all of the file
	XThrowIfError (au.SetProperty (kAudioUnitProperty_ScheduledFileRegion, 
			kAudioUnitScope_Global, 0,&rgn, sizeof(rgn)), "kAudioUnitProperty_ScheduledFileRegion");
	
		// prime the fp AU with default values
	UInt32 defaultVal = 0;
	XThrowIfError (au.SetProperty (kAudioUnitProperty_ScheduledFilePrime, 
			kAudioUnitScope_Global, 0, &defaultVal, sizeof(defaultVal)), "kAudioUnitProperty_ScheduledFilePrime");

		// tell the fp AU when to start playing (this ts is in the AU's render time stamps; -1 means next render cycle)
	AudioTimeStamp startTime;
	memset (&startTime, 0, sizeof(startTime));
	startTime.mFlags = kAudioTimeStampSampleTimeValid;
	startTime.mSampleTime = -1;
	XThrowIfError (au.SetProperty(kAudioUnitProperty_ScheduleStartTimeStamp, 
			kAudioUnitScope_Global, 0, &startTime, sizeof(startTime)), "kAudioUnitProperty_ScheduleStartTimeStamp");

	return fileDuration;
}
Ejemplo n.º 17
0
// _______________________________________________________________________________________
//
void	CAAudioFile::InitFileMaxPacketSize()
{
	LOG_FUNCTION("CAAudioFile::InitFileMaxPacketSize", "%p", this);
	UInt32 propertySize = sizeof(UInt32);
	OSStatus err = AudioFileGetProperty(mAudioFile, kAudioFilePropertyMaximumPacketSize,
		&propertySize, &mFileMaxPacketSize);
	if (err) {
		// workaround for 3361377: not all file formats' maximum packet sizes are supported
		if (!mFileDataFormat.IsPCM())
			XThrowIfError(err, "get audio file's maximum packet size");
		mFileMaxPacketSize = mFileDataFormat.mBytesPerFrame;
	}
	AllocateBuffers(true /* okToFail */);
}
Ejemplo n.º 18
0
// _______________________________________________________________________________________
//
void	CAAudioFile::WritePacketsFromCallback(
								AudioConverterComplexInputDataProc	inInputDataProc,
								void *								inInputDataProcUserData)
{
	while (true) {
		// keep writing until we exhaust the input (temporary stop), or produce no output (EOF)
		UInt32 numEncodedPackets = mIOBufferSizePackets;
		mIOBufferList.mBuffers[0].mDataByteSize = mIOBufferSizeBytes;
#if CAAUDIOFILE_PROFILE
		mInConverter = true;
#endif
		StartTiming(this, fill);
		OSStatus err = AudioConverterFillComplexBuffer(mConverter, inInputDataProc, inInputDataProcUserData,
					&numEncodedPackets, &mIOBufferList, mPacketDescs);
		ElapsedTime(this, fill, mTicksInConverter);
#if CAAUDIOFILE_PROFILE
		mInConverter = false;
#endif
		XThrowIf(err != 0 && err != kNoMoreInputRightNow, err, "convert audio packets (write)");
		if (numEncodedPackets == 0)
			break;
		Byte *buf = (Byte *)mIOBufferList.mBuffers[0].mData;
#if VERBOSE_IO
		printf("CAAudioFile::WritePacketsFromCallback: wrote %ld packets, %ld bytes\n", numEncodedPackets, mIOBufferList.mBuffers[0].mDataByteSize);
		if (mPacketDescs) {
			for (UInt32 i = 0; i < numEncodedPackets; ++i) {
				printf("  write packet %qd : offset %qd, length %ld\n", mPacketMark + i, mPacketDescs[i].mStartOffset, mPacketDescs[i].mDataByteSize);
#if VERBOSE_IO >= 2
				hexdump(buf + mPacketDescs[i].mStartOffset, mPacketDescs[i].mDataByteSize);
#endif
			}
		}
#endif
		StartTiming(this, write);
		XThrowIfError(AudioFileWritePackets(mAudioFile, mUseCache, mIOBufferList.mBuffers[0].mDataByteSize, mPacketDescs, mPacketMark, &numEncodedPackets, buf), "write audio file");
		ElapsedTime(this, write, mTicksInIO);
		mPacketMark += numEncodedPackets;
		//mNumberPackets += numEncodedPackets;
		if (mFileDataFormat.mFramesPerPacket > 0)
			mFrameMark += numEncodedPackets * mFileDataFormat.mFramesPerPacket;
		else {
			for (UInt32 i = 0; i < numEncodedPackets; ++i)
				mFrameMark += mPacketDescs[i].mVariableFramesInPacket;
		}
		if (err == kNoMoreInputRightNow)
			break;
	}
}
Ejemplo n.º 19
0
int SetupRemoteIO (AudioUnit& inRemoteIOUnit, Float64 sampleRate, AURenderCallbackStruct inRenderProc, CAStreamBasicDescription& outFormat)
{
	try {
		// Open the output unit
		AudioComponentDescription desc;
		desc.componentType = kAudioUnitType_Output;
		desc.componentSubType = kAudioUnitSubType_RemoteIO;
		desc.componentManufacturer = kAudioUnitManufacturer_Apple;
		desc.componentFlags = 0;
		desc.componentFlagsMask = 0;
		
		AudioComponent comp = AudioComponentFindNext(NULL, &desc);
		
		XThrowIfError(AudioComponentInstanceNew(comp, &inRemoteIOUnit), "couldn't open the remote I/O unit");
        
        UInt32 zero = 0;
		UInt32 one = 1;
        //enable input
		XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one)), "couldn't enable input on the remote I/O unit");
        //disable output
        XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &zero, sizeof(zero)), "couldn't disable output ");
        
        //set input callback
		XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 1, &inRenderProc, sizeof(inRenderProc)), "couldn't set remote i/o input callback");
        
		
        // set our required format - LPCM non-interleaved 32 bit floating point
        AudioStreamBasicDescription outFormat;

        outFormat.mSampleRate = sampleRate;
        outFormat.mFormatID = kAudioFormatLinearPCM;
        outFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved;
        outFormat.mFramesPerPacket = 1;
        outFormat.mBytesPerPacket= 4;
        outFormat.mChannelsPerFrame = 1;
        outFormat.mBitsPerChannel = 32;
        outFormat.mBytesPerFrame = 4;
        
		XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &outFormat, sizeof(outFormat)), "couldn't set the remote I/O unit's output client format");
        
		XThrowIfError(AudioUnitInitialize(inRemoteIOUnit), "couldn't initialize the remote I/O unit");
	}
	catch (CAXException &e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
		return 1;
	}
	catch (...) {
		fprintf(stderr, "An unknown error occurred\n");
		return 1;
	}
	
	return 0;
}
Ejemplo n.º 20
0
SInt64	CAAudioFile::FrameToPacket(SInt64 inFrame) const
{
	AudioFramePacketTranslation trans;
	UInt32 propertySize;

	switch (mFileDataFormat.mFramesPerPacket) {
	case 1:
		return inFrame;
	case 0:
		trans.mFrame = inFrame;
		propertySize = sizeof(trans);
		XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyFrameToPacket, &propertySize, &trans),
			"packet <-> frame translation unimplemented for format with variable frames/packet");
		return trans.mPacket;
	}
	return inFrame / mFileDataFormat.mFramesPerPacket;
}
Ejemplo n.º 21
0
// _______________________________________________________________________________________
//
OSStatus	CAAudioFile::SetConverterProperty(
											AudioConverterPropertyID	inPropertyID,
											UInt32						inPropertyDataSize,
											const void*					inPropertyData,
											bool						inCanFail)
{
	OSStatus err = noErr;
	//LOG_FUNCTION("ExtAudioFile::SetConverterProperty", "%p %-4.4s", this, (char *)&inPropertyID);
	if (inPropertyID == kAudioConverterPropertySettings && *(CFPropertyListRef *)inPropertyData == NULL)
		;
	else {
		err = AudioConverterSetProperty(mConverter, inPropertyID, inPropertyDataSize, inPropertyData);
		if (!inCanFail) {
			XThrowIfError(err, "set audio converter property");
		}
	}
	UpdateClientMaxPacketSize();
	if (mMode == kPreparingToWrite)
		FileFormatChanged();
	return err;
}
Ejemplo n.º 22
0
// _______________________________________________________________________________________
//	Allocates: mIOBufferList, mIOBufferSizePackets, mPacketDescs
//	Dependent on: mFileMaxPacketSize, mIOBufferSizeBytes
void	CAAudioFile::AllocateBuffers(bool okToFail)
{
	LOG_FUNCTION("CAAudioFile::AllocateBuffers", "%p", this);
	if (mFileMaxPacketSize == 0) {
		if (okToFail)
			return;
		XThrowIf(true, kExtAudioFileError_MaxPacketSizeUnknown, "file's maximum packet size is 0");
	}
	UInt32 bufferSizeBytes = mIOBufferSizeBytes = std::max(mIOBufferSizeBytes, mFileMaxPacketSize);
		// must be big enough for at least one maximum size packet

	if (mIOBufferList.mBuffers[0].mDataByteSize != bufferSizeBytes) {
		mIOBufferList.mNumberBuffers = 1;
		mIOBufferList.mBuffers[0].mNumberChannels = mFileDataFormat.mChannelsPerFrame;
		if (!mClientOwnsIOBuffer) {
			//printf("reallocating I/O buffer\n");
			delete[] (Byte *)mIOBufferList.mBuffers[0].mData;
			mIOBufferList.mBuffers[0].mData = new Byte[bufferSizeBytes];
		}
		mIOBufferList.mBuffers[0].mDataByteSize = bufferSizeBytes;
		mIOBufferSizePackets = bufferSizeBytes / mFileMaxPacketSize;
	}

	UInt32 propertySize = sizeof(UInt32);
	UInt32 externallyFramed;
	XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatIsExternallyFramed,
			sizeof(AudioStreamBasicDescription), &mFileDataFormat, &propertySize, &externallyFramed),
			"is format externally framed");
	if (mNumPacketDescs != (externallyFramed ? mIOBufferSizePackets : 0)) {
		delete[] mPacketDescs;
		mPacketDescs = NULL;
		mNumPacketDescs = 0;

		if (externallyFramed) {
			//printf("reallocating packet descs\n");
			mPacketDescs = new AudioStreamPacketDescription[mIOBufferSizePackets];
			mNumPacketDescs = mIOBufferSizePackets;
		}
	}
}
Ejemplo n.º 23
0
void	CAAudioFileRecorder::SetFile(const FSRef &parentFSRef, CFStringRef filename, AudioFileTypeID filetype, const CAStreamBasicDescription &dataFormat, const CAAudioChannelLayout *layout)
{
	delete mAudioInputPtrs; mAudioInputPtrs = NULL;
	
	CAStreamBasicDescription fileDataFormat = dataFormat;
	if (fileDataFormat.mSampleRate == 0.)
		fileDataFormat.mSampleRate = mInputDataFormat.mSampleRate;
	
	CAAudioFileWriter::SetFile(parentFSRef, filename, filetype, fileDataFormat, layout);

	const CAStreamBasicDescription &fmt = GetFile().GetClientDataFormat();
	XThrowIfError(AudioUnitSetProperty(
							mInputUnit,
							kAudioUnitProperty_StreamFormat,
							kAudioUnitScope_Output,
							1,
							(void *)&fmt,
							sizeof(AudioStreamBasicDescription)), "set audio input format");

	GetFile().SetIOBufferSizeBytes(GetBufferSizeFrames() * fmt.mBytesPerFrame);

	mAudioInputPtrs = CABufferList::New("audio input ptrs", fmt);
}
Ejemplo n.º 24
0
int SetupRemoteIO (AudioUnit& inRemoteIOUnit, AURenderCallbackStruct inRenderProc, CAStreamBasicDescription& outFormat)
{	
	try {		
		// Open the output unit
		AudioComponentDescription desc;
		desc.componentType = kAudioUnitType_Output;
		desc.componentSubType = kAudioUnitSubType_RemoteIO;
		desc.componentManufacturer = kAudioUnitManufacturer_Apple;
		desc.componentFlags = 0;
		desc.componentFlagsMask = 0;
		
		AudioComponent comp = AudioComponentFindNext(NULL, &desc);
		
		XThrowIfError(AudioComponentInstanceNew(comp, &inRemoteIOUnit), "couldn't open the remote I/O unit");

		UInt32 one = 1;
		XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one)), "couldn't enable input on the remote I/O unit");
	
		XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &inRenderProc, sizeof(inRenderProc)), "couldn't set remote i/o render callback");
		
        // NEWL: Establecer el formato canónico no de los AudioUnits sino del sistema de entrada/salida:
        //       LPCM, no entrelazado, datos enteros con signo de 16 bits.
        // outFormat.SetCanonical(2, false);
        
        // OLDL: set our required format - Canonical AU format: LPCM non-interleaved 8.24 fixed point
        outFormat.SetAUCanonical(2, false);
		outFormat.mSampleRate = SAMPRATE;
		XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outFormat, sizeof(outFormat)), "couldn't set the remote I/O unit's output client format");
		XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &outFormat, sizeof(outFormat)), "couldn't set the remote I/O unit's input client format");

		XThrowIfError(AudioUnitInitialize(inRemoteIOUnit), "couldn't initialize the remote I/O unit");
	}
	catch (CAXException &e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
		return 1;
	}
	catch (...) {
		fprintf(stderr, "An unknown error occurred\n");
		return 1;
	}	
	
	return 0;
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate, UInt32 outputBitRate)
{
	AudioFileID         sourceFileID = 0;
    AudioFileID         destinationFileID = 0;
    AudioConverterRef   converter = NULL;
    Boolean             canResumeFromInterruption = true; // we can continue unless told otherwise
    
    CAStreamBasicDescription srcFormat, dstFormat;
    AudioFileIO afio = {};
    
    char                         *outputBuffer = NULL;
    AudioStreamPacketDescription *outputPacketDescriptions = NULL;
    
    OSStatus error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    
    printf("\nDoConvertFile\n");
    
    try {
        // get the source file
        XThrowIfError(AudioFileOpenURL(sourceURL, kAudioFileReadPermission, 0, &sourceFileID), "AudioFileOpenURL failed");
	
        // get the source data format
        UInt32 size = sizeof(srcFormat);
        XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyDataFormat, &size, &srcFormat), "couldn't get source data format");
        
        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if the output format is PC create a 16-bit int PCM file format description as an example
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("Source File format: "); srcFormat.Print();
        printf("Destination format: "); dstFormat.Print();
	
        // create the AudioConverter
        
        XThrowIfError(AudioConverterNew(&srcFormat, &dstFormat, &converter), "AudioConverterNew failed!");
    
        // if the source has a cookie, get it and set it on the Audio Converter
        ReadCookie(sourceFileID, converter);

        // get the actual formats back from the Audio Converter
        size = sizeof(srcFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentInputStreamDescription, &size, &srcFormat), "AudioConverterGetProperty kAudioConverterCurrentInputStreamDescription failed!");

        size = sizeof(dstFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentOutputStreamDescription, &size, &dstFormat), "AudioConverterGetProperty kAudioConverterCurrentOutputStreamDescription failed!");

        printf("Formats returned from AudioConverter:\n");
        printf("              Source format: "); srcFormat.Print();
        printf("    Destination File format: "); dstFormat.Print();
        
        // if encoding to AAC set the bitrate
        // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
        // when you explicitly set the bit rate and the sample rate, this tells the encoder to stick with both bit rate and sample rate
        //     but there are combinations (also depending on the number of channels) which will not be allowed
        // if you do not explicitly set a bit rate the encoder will pick the correct value for you depending on samplerate and number of channels
        // bit rate also scales with the number of channels, therefore one bit rate per sample rate can be used for mono cases
        //    and if you have stereo or more, you can multiply that number by the number of channels.
        
        if (outputBitRate == 0) {
            outputBitRate = 192000; // 192kbs
        }
        
        if (dstFormat.mFormatID == kAudioFormatMPEG4AAC) {
            
            UInt32 propSize = sizeof(outputBitRate);
            
            // set the bit rate depending on the samplerate chosen
            XThrowIfError(AudioConverterSetProperty(converter, kAudioConverterEncodeBitRate, propSize, &outputBitRate),
                           "AudioConverterSetProperty kAudioConverterEncodeBitRate failed!");
            
            // get it back and print it out
            AudioConverterGetProperty(converter, kAudioConverterEncodeBitRate, &propSize, &outputBitRate);
            printf ("AAC Encode Bitrate: %u\n", (unsigned int)outputBitRate);
        }

        // can the Audio Converter resume conversion after an interruption?
        // this property may be queried at any time after construction of the Audio Converter after setting its output format
        // there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(converter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported - see comments in source for more info.\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld, paramErr is OK if PCM\n", error);
            }
            
            error = noErr;
        }
        
        // create the destination file 
        XThrowIfError(AudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, kAudioFileFlags_EraseFile, &destinationFileID), "AudioFileCreateWithURL failed!");

        // set up source buffers and data proc info struct
        afio.srcFileID = sourceFileID;
        afio.srcBufferSize = 32768;
        afio.srcBuffer = new char [afio.srcBufferSize];
        afio.srcFilePos = 0;
        afio.srcFormat = srcFormat;
		
        if (srcFormat.mBytesPerPacket == 0) {
            // if the source format is VBR, we need to get the maximum packet size
            // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
            // in the file (without actually scanning the whole file to find the largest packet,
            // as may happen with kAudioFilePropertyMaximumPacketSize)
            size = sizeof(afio.srcSizePerPacket);
            XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyPacketSizeUpperBound, &size, &afio.srcSizePerPacket), "AudioFileGetProperty kAudioFilePropertyPacketSizeUpperBound failed!");
            
            // how many packets can we read for our buffer size?
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            afio.packetDescriptions = new AudioStreamPacketDescription [afio.numPacketsPerRead];
        } else {
            // CBR source format
            afio.srcSizePerPacket = srcFormat.mBytesPerPacket;
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            afio.packetDescriptions = NULL;
        }

        // set up output buffers
        UInt32 outputSizePerPacket = dstFormat.mBytesPerPacket; // this will be non-zero if the format is CBR
        UInt32 theOutputBufSize = 32768;
        outputBuffer = new char[theOutputBufSize];
        
        if (outputSizePerPacket == 0) {
            // if the destination format is VBR, we need to get max size per packet from the converter
            size = sizeof(outputSizePerPacket);
            XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterPropertyMaximumOutputPacketSize, &size, &outputSizePerPacket), "AudioConverterGetProperty kAudioConverterPropertyMaximumOutputPacketSize failed!");
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
        }
        UInt32 numOutputPackets = theOutputBufSize / outputSizePerPacket;

        // if the destination format has a cookie, get it and set it on the output file
        WriteCookie(converter, destinationFileID);

        // write destination channel layout
        if (srcFormat.mChannelsPerFrame > 2) {
            WriteDestinationChannelLayout(converter, sourceFileID, destinationFileID);
        }

        UInt64 totalOutputFrames = 0; // used for debgging printf
        SInt64 outputFilePos = 0;
        
        // loop to convert data
        printf("Converting...\n");
        while (1) {

            // set up output buffer list
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = dstFormat.mChannelsPerFrame;
            fillBufList.mBuffers[0].mDataByteSize = theOutputBufSize;
            fillBufList.mBuffers[0].mData = outputBuffer;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = NO;
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the Audio Converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            // convert data
            UInt32 ioOutputDataPackets = numOutputPackets;
            printf("AudioConverterFillComplexBuffer...\n");
            error = AudioConverterFillComplexBuffer(converter, EncoderDataProc, &afio, &ioOutputDataPackets, &fillBufList, outputPacketDescriptions);
            // if interrupted in the process of the conversion call, we must handle the error appropriately
            if (error) {
                if (kAudioConverterErr_HardwareInUse == error) {
                     printf("Audio Converter returned kAudioConverterErr_HardwareInUse!\n");
                } else {
                    XThrowIfError(error, "AudioConverterFillComplexBuffer error!");
                }
            } else {
                if (ioOutputDataPackets == 0) {
                    // this is the EOF conditon
                    error = noErr;
                    break;
                }
            }
            
            if (noErr == error) {
                // write to output file
                UInt32 inNumBytes = fillBufList.mBuffers[0].mDataByteSize;
                XThrowIfError(AudioFileWritePackets(destinationFileID, false, inNumBytes, outputPacketDescriptions, outputFilePos, &ioOutputDataPackets, outputBuffer), "AudioFileWritePackets failed!");
            
                printf("Convert Output: Write %lu packets at position %lld, size: %ld\n", ioOutputDataPackets, outputFilePos, inNumBytes);
                
                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.mFramesPerPacket) { 
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.mFramesPerPacket);
                } else if (outputPacketDescriptions != NULL) {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (UInt32 i = 0; i < ioOutputDataPackets; ++i)
                        totalOutputFrames += outputPacketDescriptions[i].mVariableFramesInPacket;
                }
            }
        } // while

        if (noErr == error) {
            // write out any of the leading and trailing frames for compressed formats only
            if (dstFormat.mBitsPerChannel == 0) {
                // our output frame count should jive with
                printf("Total number of output frames counted: %lld\n", totalOutputFrames); 
                WritePacketTableInfo(converter, destinationFileID);
            }
        
            // write the cookie again - sometimes codecs will update cookies at the end of a conversion
            WriteCookie(converter, destinationFileID);
        }
    }
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // cleanup
    if (converter) AudioConverterDispose(converter);
    if (destinationFileID) AudioFileClose(destinationFileID);
	if (sourceFileID) AudioFileClose(sourceFileID);
    
    if (afio.srcBuffer) delete [] afio.srcBuffer;
    if (afio.packetDescriptions) delete [] afio.packetDescriptions;
    if (outputBuffer) delete [] outputBuffer;
    if (outputPacketDescriptions) delete [] outputPacketDescriptions;
    
    
    return error;
}
Ejemplo n.º 26
0
void	CAAudioFileConverter::GenerateOutputFileName(const char *inputFilePath, 
						const CAStreamBasicDescription &inputFormat,
						const CAStreamBasicDescription &outputFormat, OSType outputFileType, 
						char *outName)
{
	struct stat sb;
	char inputDir[256];
	char inputBasename[256];
	
	strcpy(inputDir, dirname(inputFilePath));
	const char *infname = basename(inputFilePath);
	const char *inext = strrchr(infname, '.');
	if (inext == NULL) strcpy(inputBasename, infname);
	else {
		int n;
		memcpy(inputBasename, infname, n = inext - infname);
		inputBasename[n] = '\0';
	}
	
	CFArrayRef exts;
	UInt32 propSize = sizeof(exts);
	XThrowIfError(AudioFileGetGlobalInfo(kAudioFileGlobalInfo_ExtensionsForType,
		sizeof(OSType), &outputFileType, &propSize, &exts), "generate output file name");
	char outputExt[32];
	CFStringRef cfext = (CFStringRef)CFArrayGetValueAtIndex(exts, 0);
	CFStringGetCString(cfext, outputExt, sizeof(outputExt), kCFStringEncodingUTF8);
	CFRelease(exts);
	
	// 1. olddir + oldname + newext
	sprintf(outName, "%s/%s.%s", inputDir, inputBasename, outputExt);
#if TARGET_OS_MAC	
	if (lstat(outName, &sb)) return;
#else
	if (stat(outName, &sb)) return;
#endif

	if (outputFormat.IsPCM()) {
		// If sample rate changed:
		//	2. olddir + oldname + "-SR" + newext
		if (inputFormat.mSampleRate != outputFormat.mSampleRate && outputFormat.mSampleRate != 0.) {
			sprintf(outName, "%s/%s-%.0fk.%s", inputDir, inputBasename, outputFormat.mSampleRate/1000., outputExt);
#if TARGET_OS_MAC	
			if (lstat(outName, &sb)) return;
#else
			if (stat(outName, &sb)) return;
#endif
		}
		// If bit depth changed:
		//	3. olddir + oldname + "-bit" + newext
		if (inputFormat.mBitsPerChannel != outputFormat.mBitsPerChannel) {
			sprintf(outName, "%s/%s-%ldbit.%s", inputDir, inputBasename, outputFormat.mBitsPerChannel, outputExt);
#if TARGET_OS_MAC	
			if (lstat(outName, &sb)) return;
#else
			if (stat(outName, &sb)) return;
#endif
		}
	}
	
	// maybe more with channels/layouts? $$$
	
	// now just append digits
	for (int i = 1; ; ++i) {
		sprintf(outName, "%s/%s-%d.%s", inputDir, inputBasename, i, outputExt);
#if TARGET_OS_MAC	
		if (lstat(outName, &sb)) return;
#else
		if (stat(outName, &sb)) return;
#endif
	}
}
Ejemplo n.º 27
0
void	CAAudioFileConverter::ConvertFile(const ConversionParameters &_params)
{
	FSRef destFSRef;
	UInt32 propertySize;
	CAStreamBasicDescription destFormat;
	CAAudioChannelLayout origSrcFileLayout, srcFileLayout, destFileLayout;
	bool openedSourceFile = false, createdOutputFile = false;
	
	mParams = _params;
	mReadBuffer = NULL;
	mReadPtrs = NULL;
	CABufferList *writeBuffer = NULL;
	CABufferList *writePtrs = NULL;
	
	PrepareConversion();

	try {
		if (TaggedDecodingFromCAF())
			ReadCAFInfo();
		OpenInputFile();
		openedSourceFile = true;
		
		// get input file's format
		const CAStreamBasicDescription &srcFormat = mSrcFile.GetFileDataFormat();
		if (mParams.flags & kOpt_Verbose) {
			printf("Input file: %s, %qd frames\n", mParams.input.filePath ? basename(mParams.input.filePath) : "?", 
				mSrcFile.GetNumberFrames());
		}
		mSrcFormat = srcFormat;
		
		bool encoding = !destFormat.IsPCM();
		bool decoding = !srcFormat.IsPCM();
		
		// prepare output file's format
		destFormat = mParams.output.dataFormat;

		if (!encoding && destFormat.mSampleRate == 0.)
			// on encode, it's OK to have a 0 sample rate; ExtAudioFile will get the SR from the converter and set it on the file.
			// on decode or PCM->PCM, a sample rate of 0 is interpreted as using the source sample rate
			destFormat.mSampleRate = srcFormat.mSampleRate;
		
		// source channel layout
		srcFileLayout = mSrcFile.GetFileChannelLayout();
		origSrcFileLayout = srcFileLayout;
		if (mParams.input.channelLayoutTag != 0) {
			XThrowIf(AudioChannelLayoutTag_GetNumberOfChannels(mParams.input.channelLayoutTag)
				!= srcFormat.mChannelsPerFrame, -1, "input channel layout has wrong number of channels for file");
			srcFileLayout = CAAudioChannelLayout(mParams.input.channelLayoutTag);
			mSrcFile.SetFileChannelLayout(srcFileLayout);
		}
		
		// destination channel layout
		int outChannels = mParams.output.channels;
		if (mParams.output.channelLayoutTag != 0) {
			// use the one specified by caller, if any
			destFileLayout = CAAudioChannelLayout(mParams.output.channelLayoutTag);
		} else if (srcFileLayout.IsValid()) {
			// otherwise, assume the same as the source, if any
			destFileLayout = srcFileLayout;
		}
		if (destFileLayout.IsValid()) {
			// the output channel layout specifies the number of output channels
			if (outChannels != -1)
				XThrowIf((unsigned)outChannels != destFileLayout.NumberChannels(), -1,
					"output channel layout has wrong number of channels");
			else
				outChannels = destFileLayout.NumberChannels();
		}

		if (!(mParams.flags & kOpt_NoSanitizeOutputFormat)) {
			// adjust the output format's channels; output.channels overrides the channels
			if (outChannels == -1)
				outChannels = srcFormat.mChannelsPerFrame;
			if (outChannels > 0) {
				destFormat.mChannelsPerFrame = outChannels;
				destFormat.mBytesPerPacket *= outChannels;
				destFormat.mBytesPerFrame *= outChannels;
			}
		
			// use AudioFormat API to clean up the output format
			propertySize = sizeof(AudioStreamBasicDescription);
			XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &propertySize, &destFormat),
					"get destination format info");
		}
		OpenOutputFile(srcFormat, destFormat, destFSRef, destFileLayout);
		createdOutputFile = true;
		mDestFormat = destFormat;
		
		// set up client formats
		CAStreamBasicDescription srcClientFormat, destClientFormat;
		{
			CAAudioChannelLayout srcClientLayout, destClientLayout;
			
			if (encoding) {
				if (decoding) {
					// transcoding
//					XThrowIf(encoding && decoding, -1, "transcoding not currently supported");
					
					if (srcFormat.mChannelsPerFrame > 2 || destFormat.mChannelsPerFrame > 2)
						CAXException::Warning("Transcoding multichannel audio may not handle channel layouts correctly", 0);
					srcClientFormat.SetCanonical(std::min(srcFormat.mChannelsPerFrame, destFormat.mChannelsPerFrame), true);
					srcClientFormat.mSampleRate = std::max(srcFormat.mSampleRate, destFormat.mSampleRate);
					mSrcFile.SetClientFormat(srcClientFormat, NULL);
					
					destClientFormat = srcClientFormat;
				} else {
					// encoding
					srcClientFormat = srcFormat;
					destClientFormat = srcFormat;
				}
				// by here, destClientFormat will have a valid sample rate
				destClientLayout = srcFileLayout.IsValid() ? srcFileLayout : destFileLayout;

				mDestFile.SetClientFormat(destClientFormat, &destClientLayout);
			} else {
				// decoding or PCM->PCM
				if (destFormat.mSampleRate == 0.)
					destFormat.mSampleRate = srcFormat.mSampleRate;
		
				destClientFormat = destFormat;
				srcClientFormat = destFormat;
				srcClientLayout = destFileLayout;
				
				mSrcFile.SetClientFormat(srcClientFormat, &srcClientLayout);
			}
		}
		
		XThrowIf(srcClientFormat.mBytesPerPacket == 0, -1, "source client format not PCM"); 
		XThrowIf(destClientFormat.mBytesPerPacket == 0, -1, "dest client format not PCM"); 		
		if (encoding) {
			// set the bitrate
			if (mParams.output.bitRate != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("bitrate = %ld\n", mParams.output.bitRate);
				mDestFile.SetConverterProperty(kAudioConverterEncodeBitRate, sizeof(UInt32), &mParams.output.bitRate);
			}

			// set the codec quality
			if (mParams.output.codecQuality != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("codec quality = %ld\n", mParams.output.codecQuality);
				mDestFile.SetConverterProperty(kAudioConverterCodecQuality, sizeof(UInt32), &mParams.output.codecQuality);
			}

			// set the bitrate strategy -- called bitrate format in the codecs since it had already shipped
			if (mParams.output.strategy != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("strategy = %ld\n", mParams.output.strategy);
				mDestFile.SetConverterProperty(kAudioCodecBitRateFormat, sizeof(UInt32), &mParams.output.strategy);
			}
		}
		// set the SRC quality
		if (mParams.output.srcQuality != -1) {
			if (srcFormat.mSampleRate != 0. && destFormat.mSampleRate != 0. && srcFormat.mSampleRate != destFormat.mSampleRate) {
				if (mParams.flags & kOpt_Verbose)
					printf("SRC quality = %ld\n", mParams.output.srcQuality);
				if (encoding)
					mDestFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality);
				else
					mSrcFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality);
			}
		}
		if (decoding) {
			if (mParams.output.primeMethod != -1)
				mSrcFile.SetConverterProperty(kAudioConverterPrimeMethod, sizeof(UInt32), &mParams.output.primeMethod);
		}

		PrintFormats(&origSrcFileLayout);

		// prepare I/O buffers
		UInt32 bytesToRead = 0x10000;
		UInt32 framesToRead = bytesToRead;	// OK, ReadPackets will limit as appropriate
		ComputeReadSize(srcFormat, destFormat, bytesToRead, framesToRead);

//		const SInt64 totalFrames = mSrcFile.GetNumberFrames();
//#warning "GetNumberFrames() can be prohibitively slow for some formats"
		
		mReadBuffer = CABufferList::New("readbuf", srcClientFormat);
		mReadBuffer->AllocateBuffers(bytesToRead);
		mReadPtrs = CABufferList::New("readptrs", srcClientFormat);
		
		BeginConversion();
		
		while (true) {
			//XThrowIf(Progress(mSrcFile.Tell(), totalFrames), userCanceledErr, "user stopped");
				// this was commented out for awhile -- performance? make it optional?
			UInt32 nFrames = framesToRead;
			mReadPtrs->SetFrom(mReadBuffer);
			AudioBufferList *readbuf = &mReadPtrs->GetModifiableBufferList();
			
			mSrcFile.Read(nFrames, readbuf);
			//printf("read %ld of %ld frames\n", nFrames, framesToRead);
			if (nFrames == 0)
				break;

			mDestFile.Write(nFrames, readbuf);
			if (ShouldTerminateConversion())
				break;
		}
		
		if (decoding) {
			// fix up the destination file's length if necessary and possible
			SInt64 nframes = mSrcFile.GetNumberFrames();
			if (nframes != 0) {
				// only shorten, don't try to lengthen
				nframes = SInt64(ceil(nframes * destFormat.mSampleRate / srcFormat.mSampleRate));
				if (nframes < mDestFile.GetNumberFrames()) {
					mDestFile.SetNumberFrames(nframes);
				}
			}
		}
		EndConversion();
	}
	catch (...) {
		delete mReadBuffer;
		delete mReadPtrs;
		delete writeBuffer;
		delete writePtrs;
		if (!createdOutputFile)
			PrintFormats(&origSrcFileLayout);
		try { mSrcFile.Close(); } catch (...) { }
		try { mDestFile.Close(); } catch (...) { }
		if (createdOutputFile)
			unlink(mOutName);
		throw;
	}
	delete mReadBuffer;
	delete mReadPtrs;
	delete writeBuffer;
	delete writePtrs;
	mSrcFile.Close();
	mDestFile.Close();
	if (TaggedEncodingToCAF())
		WriteCAFInfo();
	
	if (mParams.flags & kOpt_Verbose) {
		// must close to flush encoder; GetNumberFrames() not necessarily valid until afterwards but then
		// the file is closed
		CAAudioFile temp;
		FSRef destFSRef;
		if (FSPathMakeRef((UInt8 *)mOutName, &destFSRef, NULL) == noErr) {
			temp.Open(destFSRef);
			printf("Output file: %s, %qd frames\n", basename(mOutName), temp.GetNumberFrames());
		}
	}
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate) 
{
    ExtAudioFileRef sourceFile = 0;
    ExtAudioFileRef destinationFile = 0;
    Boolean         canResumeFromInterruption = true; // we can continue unless told otherwise
    OSStatus        error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    ThreadStateSetRunning();
    
    printf("DoConvertFile\n");
    
	try {
        CAStreamBasicDescription srcFormat, dstFormat;

        // open the source file
        XThrowIfError(ExtAudioFileOpenURL(sourceURL, &sourceFile), "ExtAudioFileOpenURL failed");
			
        // get the source data format
		UInt32 size = sizeof(srcFormat);
		XThrowIfError(ExtAudioFileGetProperty(sourceFile, kExtAudioFileProperty_FileDataFormat, &size, &srcFormat), "couldn't get source data format");
		
		printf("\nSource file format: "); srcFormat.Print();

        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if PCM was selected as the destination format, create a 16-bit int PCM file format description
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("\nDestination file format: "); dstFormat.Print();
        
        // create the destination file 
        XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, NULL, kAudioFileFlags_EraseFile, &destinationFile), "ExtAudioFileCreateWithURL failed!");

        // set the client format - The format must be linear PCM (kAudioFormatLinearPCM)
        // You must set this in order to encode or decode a non-PCM file data format
        // You may set this on PCM files to specify the data format used in your calls to read/write
        CAStreamBasicDescription clientFormat;
        if (outputFormat == kAudioFormatLinearPCM) {
            clientFormat = dstFormat;
        } else {
            clientFormat.SetCanonical(srcFormat.NumberChannels(), true);
            clientFormat.mSampleRate = srcFormat.mSampleRate;
        }
        
        printf("\nClient data format: "); clientFormat.Print();
        printf("\n");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(sourceFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set source client format");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(destinationFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set destination client format");

        // can the audio converter (which in this case is owned by an ExtAudioFile object) resume conversion after an interruption?
        AudioConverterRef audioConverter;
                    
        size = sizeof(audioConverter);
        XThrowIfError(ExtAudioFileGetProperty(destinationFile, kExtAudioFileProperty_AudioConverter, &size, &audioConverter), "Couldn't get Audio Converter!");
        
        // this property may be queried at any time after construction of the audio converter (which in this case is owned by an ExtAudioFile object)
        // after setting the output format -- there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(audioConverter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported!\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld\n", error);
            }
            
            error = noErr;
        }
        
        // set up buffers
        UInt32 bufferByteSize = 32768;
        char srcBuffer[bufferByteSize];
        
        // keep track of the source file offset so we know where to reset the source for
        // reading if interrupted and input was not consumed by the audio converter
        SInt64 sourceFrameOffset = 0;
        
        //***** do the read and write - the conversion is done on and by the write call *****//
        printf("Converting...\n");
        while (1) {
        
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = clientFormat.NumberChannels();
            fillBufList.mBuffers[0].mDataByteSize = bufferByteSize;
            fillBufList.mBuffers[0].mData = srcBuffer;
                
            // client format is always linear PCM - so here we determine how many frames of lpcm
            // we can read/write given our buffer size
            UInt32 numFrames;
            if (clientFormat.mBytesPerFrame > 0) // rids bogus analyzer div by zero warning mBytesPerFrame can't be 0 and is protected by an Assert
                numFrames = clientFormat.BytesToFrames(bufferByteSize); // (bufferByteSize / clientFormat.mBytesPerFrame);

            XThrowIfError(ExtAudioFileRead(sourceFile, &numFrames, &fillBufList), "ExtAudioFileRead failed!");	
            if (!numFrames) {
                // this is our termination condition
                error = noErr;
                break;
            }
            sourceFrameOffset += numFrames;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = ThreadStatePausedCheck();
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the audio converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            error = ExtAudioFileWrite(destinationFile, numFrames, &fillBufList);
            // if interrupted in the process of the write call, we must handle the errors appropriately
            if (error) {
                if (kExtAudioFileError_CodecUnavailableInputConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was successfully
                        consumed and you may proceed to the next buffer
                    */
                    
                } else if (kExtAudioFileError_CodecUnavailableInputNotConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputNotConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was not
                        successfully consumed and you must try to write it again
                    */
                    
                    // seek back to last offset before last read so we can try again after the interruption
                    sourceFrameOffset -= numFrames;
                    XThrowIfError(ExtAudioFileSeek(sourceFile, sourceFrameOffset), "ExtAudioFileSeek failed!");
                    
                } else {
                    XThrowIfError(error, "ExtAudioFileWrite error!");
                }
            } // if
        } // while
	}
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // close
    if (destinationFile) ExtAudioFileDispose(destinationFile);
    if (sourceFile) ExtAudioFileDispose(sourceFile);

    // transition thread state to kStateDone before continuing
    ThreadStateSetDone();
    
    return error;
}
Ejemplo n.º 29
0
int main (int argc, const char * argv[]) 
{
#if TARGET_OS_WIN32
	InitializeQTML(0L);
#endif
	const char *fpath = NULL;
	Float32 volume = 1;
	Float32 duration = -1;
	Float32 currentTime = 0.0;
	Float32 rate = 0;
	int rQuality = 0;
	
	bool doPrint = false;
	for (int i = 1; i < argc; ++i) {
		const char *arg = argv[i];
		if (arg[0] != '-') {
			if (fpath != NULL) {
				fprintf(stderr, "may only specify one file to play\n");
				usage();
			}
			fpath = arg;
		} else {
			arg += 1;
			if (arg[0] == 'v' || !strcmp(arg, "-volume")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];
				sscanf(arg, "%f", &volume);
			} else if (arg[0] == 't' || !strcmp(arg, "-time")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%f", &duration);
			} else if (arg[0] == 'r' || !strcmp(arg, "-rate")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%f", &rate);
			} else if (arg[0] == 'q' || !strcmp(arg, "-rQuality")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%d", &rQuality);
			} else if (arg[0] == 'h' || !strcmp(arg, "-help")) {
				usage();
			} else if (arg[0] == 'd' || !strcmp(arg, "-debug")) {
				doPrint = true;
			} else {
				fprintf(stderr, "unknown argument: %s\n\n", arg - 1);
				usage();
			}
		}
	}

	if (fpath == NULL)
		usage();
	
	if (doPrint)
		printf ("Playing file: %s\n", fpath);
	
	try {
		AQTestInfo myInfo;
		
		CFURLRef sndFile = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)fpath, strlen(fpath), false);
		if (!sndFile) XThrowIfError (!sndFile, "can't parse file path");
			
		OSStatus result = AudioFileOpenURL (sndFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile);
		CFRelease (sndFile);
						
		XThrowIfError(result, "AudioFileOpen failed");
		
		UInt32 size;
		XThrowIfError(AudioFileGetPropertyInfo(myInfo.mAudioFile, 
									kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info");
		UInt32 numFormats = size / sizeof(AudioFormatListItem);
		AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ];

		XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, 
									kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format");
		numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it
		if (numFormats == 1) {
				// this is the common case
			myInfo.mDataFormat = formatList[0].mASBD;
			
				// see if there is a channel layout (multichannel file)
			result = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, NULL);
			if (result == noErr && myInfo.mChannelLayoutSize > 0) {
				myInfo.mChannelLayout = (AudioChannelLayout *)new char [myInfo.mChannelLayoutSize];
				XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, myInfo.mChannelLayout), "get audio file's channel layout");
			}
		} else {
			if (doPrint) {
				printf ("File has a %d layered data format:\n", (int)numFormats);
				for (unsigned int i = 0; i < numFormats; ++i)
					CAStreamBasicDescription(formatList[i].mASBD).Print();
			}
			// now we should look to see which decoders we have on the system
			XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's");
			UInt32 numDecoders = size / sizeof(OSType);
			OSType *decoderIDs = new OSType [ numDecoders ];
			XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's");			
			unsigned int i = 0;
			for (; i < numFormats; ++i) {
				OSType decoderID = formatList[i].mASBD.mFormatID;
				bool found = false;
				for (unsigned int j = 0; j < numDecoders; ++j) {
					if (decoderID == decoderIDs[j]) {
						found = true;
						break;
					}
				}
				if (found) break;
			}
			delete [] decoderIDs;
			
			if (i >= numFormats) {
				fprintf (stderr, "Cannot play any of the formats in this file\n");
				throw kAudioFileUnsupportedDataFormatError;
			}
			myInfo.mDataFormat = formatList[i].mASBD;
			myInfo.mChannelLayoutSize = sizeof(AudioChannelLayout);
			myInfo.mChannelLayout = (AudioChannelLayout*)new char [myInfo.mChannelLayoutSize];
			myInfo.mChannelLayout->mChannelLayoutTag = formatList[i].mChannelLayoutTag;
			myInfo.mChannelLayout->mChannelBitmap = 0;
			myInfo.mChannelLayout->mNumberChannelDescriptions = 0;
		}
		delete [] formatList;
		
		if (doPrint) {
			printf ("Playing format: "); 
			myInfo.mDataFormat.Print();
		}
		
		XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat, AQTestBufferCallback, &myInfo, 
									CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &myInfo.mQueue), "AudioQueueNew failed");

		UInt32 bufferByteSize;		
		// we need to calculate how many packets we read at a time, and how big a buffer we need
		// we base this on the size of the packets in the file and an approximate duration for each buffer
		{
			bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0);
			
			// first check to see what the max size of a packet is - if it is bigger
			// than our allocation default size, that needs to become larger
			UInt32 maxPacketSize;
			size = sizeof(maxPacketSize);
			XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, 
									kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size");
			
			// adjust buffer size to represent about a half second of audio based on this format
			CalculateBytesForTime (myInfo.mDataFormat, maxPacketSize, 0.5/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead);
			
			if (isFormatVBR)
				myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead];
			else
				myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
				
			if (doPrint)
				printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead);
		}

		// (2) If the file has a cookie, we should get it and set it on the AQ
		size = sizeof(UInt32);
		result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL);

		if (!result && size) {
			char* cookie = new char [size];		
			XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file");
			XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue");
			delete [] cookie;
		}

		// set ACL if there is one
		if (myInfo.mChannelLayout)
			XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, myInfo.mChannelLayout, myInfo.mChannelLayoutSize), "set channel layout on queue");

		// prime the queue with some data before starting
		myInfo.mDone = false;
		myInfo.mCurrentPacket = 0;
		for (int i = 0; i < kNumberBuffers; ++i) {
			XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffers[i]), "AudioQueueAllocateBuffer failed");

			AQTestBufferCallback (&myInfo, myInfo.mQueue, myInfo.mBuffers[i]);
			
			if (myInfo.mDone) break;
		}	
			// set the volume of the queue
		XThrowIfError (AudioQueueSetParameter(myInfo.mQueue, kAudioQueueParam_Volume, volume), "set queue volume");
		
		XThrowIfError (AudioQueueAddPropertyListener (myInfo.mQueue, kAudioQueueProperty_IsRunning, MyAudioQueuePropertyListenerProc, NULL), "add listener");
		
#if !TARGET_OS_IPHONE
		if (rate > 0) {
			UInt32 propValue = 1;
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_EnableTimePitch, &propValue, sizeof(propValue)), "enable time pitch");
			
			propValue = rQuality ? kAudioQueueTimePitchAlgorithm_Spectral : kAudioQueueTimePitchAlgorithm_TimeDomain;
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchAlgorithm, &propValue, sizeof(propValue)), "time pitch algorithm");
			
			propValue = (rate == 1.0f ? 1 : 0); // bypass rate if 1.0
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchBypass, &propValue, sizeof(propValue)), "bypass time pitch");
			if (rate != 1) {
				XThrowIfError (AudioQueueSetParameter (myInfo.mQueue, kAudioQueueParam_PlayRate, rate), "set playback rate");
			}
			
			if (doPrint) {
				printf ("Enable rate-scaled playback (rate = %.2f) using %s algorithm\n", rate, (rQuality ? "Spectral": "Time Domain"));
			}
		}
#endif
			// lets start playing now - stop is called in the AQTestBufferCallback when there's
			// no more to read from the file
		XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed");

		do {
			CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false);
			currentTime += .25;
			if (duration > 0 && currentTime >= duration)
				break;
			
		} while (gIsRunning);
			
		CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false);

		XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed");
		XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed");
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
	}
	catch (...) {
		fprintf(stderr, "Unspecified exception\n");
	}
	
    return 0;
}
Ejemplo n.º 30
0
void	CAChannelMappingPlayer::SetupChannelMapping()
{
	delete mMapper;
	mMapper = NULL;
	
	const CAStreamBasicDescription &fileFormat = GetFile().GetClientDataFormat();
	CAStreamBasicDescription deviceFormat;
	UInt32 propertySize = sizeof(AudioStreamBasicDescription);
	
	XThrowIfError(AudioUnitGetProperty(
							GetOutputUnit(),
							kAudioUnitProperty_StreamFormat,
							kAudioUnitScope_Output,
							0,
							(void *)&deviceFormat,
							&propertySize), "get output device's format");

#if VERBOSE
	printf("CAChannelMappingPlayer::SetupChannelMapping: %ld-ch file, %ld-ch device\n",
		fileFormat.mChannelsPerFrame, deviceFormat.mChannelsPerFrame);
#endif

	if (fileFormat.mChannelsPerFrame <= deviceFormat.mChannelsPerFrame) {
		// no mapping needed, use output unit's default behavior 
		// (default stereo pair and speaker config from AMS)
#if VERBOSE
		printf("  using output unit's channel mapping\n");
#endif
		CAAudioFilePlayer::SetupChannelMapping();
	} else {
		// fewer device than file channels, mapping needed
		CAAudioChannelLayout fileLayout, deviceLayout;
		
#if VERBOSE
		printf("  using our own channel mapping\n");
#endif
		deviceFormat.mSampleRate = fileFormat.mSampleRate;
		deviceFormat.SetCanonical(deviceFormat.mChannelsPerFrame, false);	// force deinterleaved
		
		fileLayout = GetFile().GetFileChannelLayout();

		UInt32 layoutSize;
		Boolean writable;
		OSStatus err = AudioUnitGetPropertyInfo(
								GetOutputUnit(),
								kAudioUnitProperty_AudioChannelLayout,
								kAudioUnitScope_Input,
								0,
								&layoutSize,
								&writable);
		if (!err) {
			char *buf = (char *)malloc(layoutSize);
			err = AudioUnitGetProperty(
								GetOutputUnit(),
								kAudioUnitProperty_AudioChannelLayout,
								kAudioUnitScope_Input,
								0,
								buf,
								&layoutSize);
			deviceLayout = CAAudioChannelLayout(reinterpret_cast<AudioChannelLayout *>(buf));
			free(buf);
		}
		mMapper = new CAChannelMapper(fileFormat, deviceFormat, &fileLayout, &deviceLayout);

		// give the output unit the same number of channels as in the device, 
		// since we'll be doing the mapping ourselves
		XThrowIfError(AudioUnitSetProperty(
								GetOutputUnit(),
								kAudioUnitProperty_StreamFormat,
								kAudioUnitScope_Input,
								0,
								(void *)&deviceFormat,
								sizeof(AudioStreamBasicDescription)), "set audio output format");
		
		XThrowIfError(mMapper->OpenMixer(fileFormat.mSampleRate), "open mixer");
		XThrowIfError(mMapper->ConfigureDownmix(), "configure downmix");
		
		AudioUnitConnection conn;
		conn.sourceAudioUnit = mMapper->GetMixer();
		conn.sourceOutputNumber = 0;
		conn.destInputNumber = 0;

		XThrowIfError(AudioUnitSetProperty(
								GetOutputUnit(),
								kAudioUnitProperty_MakeConnection,
								kAudioUnitScope_Global,
								0,
								(void *)&conn,
								sizeof(AudioUnitConnection)), "connect mixer to output unit");
		
		AURenderCallbackStruct input;
		input.inputProc = InputProc;
		input.inputProcRefCon = this;
		XThrowIfError(AudioUnitSetProperty(
									conn.sourceAudioUnit, 
									kAudioUnitProperty_SetRenderCallback, 
									kAudioUnitScope_Global,
									0,
									&input, 
									sizeof(input)), "connect input proc to mixer");
		// provide NO channel layout
//		mReadBuf = CABufferList::New("", fileFormat);
//		mReadBuf->AllocateBuffers(
	}
}