AudioFile::AudioFile(CFURLRef fileURL) : mAudioFileID(0), mAudioConverterRef(0), mTotalFrames(0), mFileSize(0), mIsVBR(false), mNumPacketsToRead(0), mPacketDescs(0), mConverterBuffer(0), mCursor(0)
{
  checkError(AudioFileOpenURL(fileURL, kAudioFileReadPermission, NULL, &mAudioFileID), "AudioFileOpenURL");
  UInt32 size = sizeof(AudioStreamBasicDescription);
  checkError(AudioFileGetProperty(mAudioFileID, kAudioFilePropertyDataFormat, &size, &mInputFormat), "AudioFileGetProperty");
  
  size = sizeof(UInt64);
  checkError(AudioFileGetProperty(mAudioFileID, kAudioFilePropertyAudioDataByteCount, &size, &mFileSize), "AudioFileGetProperty");
  if (mInputFormat.mBytesPerFrame == 0) {
    mIsVBR = true;
  }
  
  UInt64 totalPackets;
  size = sizeof(UInt64);
  checkError(AudioFileGetProperty(mAudioFileID, kAudioFilePropertyAudioDataPacketCount, &size, &totalPackets), "AudioFileGetProperty");
  
  if (!mIsVBR) {
    mTotalFrames = totalPackets;
  } else {
    AudioFramePacketTranslation translation;
    translation.mPacket = totalPackets;
    size = sizeof(AudioFramePacketTranslation);
    checkError(AudioFileGetProperty(mAudioFileID, kAudioFilePropertyPacketToFrame, &size, &translation), "AudioFileGetProperty");
    mTotalFrames = translation.mFrame;
  }
  
  mCursor = (UInt64*)calloc(1, sizeof(UInt64));
  *mCursor = 0;
  std::cout << "Total Packets : " << mTotalFrames << std::endl;
}
ScheduledAudioFileRegion RegionForEntireFile(AudioFileID fileID) {
	ScheduledAudioFileRegion region = {0};
	UInt64 numPackets = 0;
	UInt32 dataSize = sizeof(numPackets);
	
	AudioFileGetProperty(fileID,
	                     kAudioFilePropertyAudioDataPacketCount,
	                     &dataSize,
	                     &numPackets);
	
	AudioStreamBasicDescription asbd = {0};
	dataSize = sizeof(asbd);
	
	AudioFileGetProperty(fileID, kAudioFilePropertyDataFormat, &dataSize, &asbd);
	
	// defining a region which basically says "play the whole file"
	region.mTimeStamp.mFlags       = kAudioTimeStampHostTimeValid;
	region.mTimeStamp.mSampleTime  = 0;
	region.mCompletionProc         = NULL;
	region.mCompletionProcUserData = NULL;
	region.mAudioFile              = fileID;
	region.mLoopCount              = 0;
	region.mStartFrame             = 0;
	region.mFramesToPlay           = numPackets * asbd.mFramesPerPacket;
	
	return region;
}
Example #3
0
 music_obj<audio_queue_driver>::music_obj(const boost::shared_ptr<ifdstream>& ifd, bool loop, float gain,
                                          float start, float end)
 : packet_index_(0)
 , start_packet_index_(0)
 , stop_packet_index_(0)
 , volume_(gain)
 , loop_(loop)
 , is_paused_(false)
 , ifd_(ifd)
 {        
     LOG("Got ifdstream from path..");
     
     OSStatus res = AudioFileOpenWithCallbacks(this, &music_obj::af_read_cb, &music_obj::af_write_cb,
                         &music_obj::af_get_size_cb, &music_obj::af_set_size_cb, 
                             kAudioFileCAFType, &audio_file_);
     
     if(res)
     {
         throw sys_exception("audio_queue_driver: couldn't open audio file in liverpool fs. AudioFile returned " 
                                 + boost::lexical_cast<std::string>(res));
     }
     
     UInt32 size = sizeof(data_format_);
     AudioFileGetProperty(audio_file_, kAudioFilePropertyDataFormat, &size, &data_format_);
     
     AudioQueueNewOutput(&data_format_, &music_obj<audio_queue_driver>::buffer_cb, this, NULL, NULL, 0, &queue_);        
     AudioQueueAddPropertyListener(queue_, kAudioQueueProperty_IsRunning, &music_obj<audio_queue_driver>::playback_cb, this);
     
     if (data_format_.mBytesPerPacket == 0 || data_format_.mFramesPerPacket == 0)
     {
         size = sizeof(max_packet_size_);
         AudioFileGetProperty(audio_file_, kAudioFilePropertyPacketSizeUpperBound, &size, &max_packet_size_);
         if (max_packet_size_ > BUFFER_SIZE_BYTES)
         {
             max_packet_size_ = BUFFER_SIZE_BYTES;
         }
         
         num_packets_to_read_ = BUFFER_SIZE_BYTES / max_packet_size_;
         packet_descriptions_ = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * num_packets_to_read_);
     }
     else
     {
         num_packets_to_read_ = BUFFER_SIZE_BYTES / data_format_.mBytesPerPacket;
         packet_descriptions_ = NULL;
     }
     
     AudioFileGetPropertyInfo(audio_file_, kAudioFilePropertyMagicCookieData, &size, NULL);
     if (size > 0)
     {
         char* cookie = (char*)malloc(sizeof(char) * size);
         AudioFileGetProperty(audio_file_, kAudioFilePropertyMagicCookieData, &size, cookie);
         AudioQueueSetProperty(queue_, kAudioQueueProperty_MagicCookie, cookie, size);
         free(cookie);
     }
     
     calculate_seek(start, end);        
     volume(volume_);
     prime();   
 }
// ----------------------------------------------------------
bool ofxAudioUnitFilePlayer::setFile(std::string filePath)
// ----------------------------------------------------------
{
	CFURLRef fileURL = CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault,
																														 (const UInt8 *)filePath.c_str(),
																														 filePath.length(),
																														 NULL);
	
	if(fileID[0]) AudioFileClose(fileID[0]);
	
	OSStatus s = AudioFileOpenURL(fileURL, kAudioFileReadPermission, 0, fileID);
	
	CFRelease(fileURL);
	
	if(s != noErr)
	{
		if(s == fnfErr)
		{
			cout << "File not found : " << filePath << endl;
		}
		else 
		{
			cout << "Error " << s << " while opening file at " << filePath << endl;
		}
		return false;
	}
	
	UInt64 numPackets = 0;
	UInt32 dataSize = sizeof(numPackets);
	
	AudioFileGetProperty(fileID[0], kAudioFilePropertyAudioDataPacketCount, &dataSize, &numPackets);
	
	AudioStreamBasicDescription asbd = {0};
	dataSize = sizeof(asbd);
	
	AudioFileGetProperty(fileID[0], kAudioFilePropertyDataFormat, &dataSize, &asbd);
	
	// defining a region which basically says "play the whole file"
	memset(&region, 0, sizeof(region));
	region.mTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
	region.mTimeStamp.mSampleTime = 0;
	region.mCompletionProc = NULL;
	region.mCompletionProcUserData = NULL;
	region.mAudioFile = fileID[0];
	region.mLoopCount = 0;
	region.mStartFrame = 0;
	region.mFramesToPlay = numPackets * asbd.mFramesPerPacket;
	
	// setting the file ID now since it seems to have some overhead.
	// Doing it now ensures you'll get sound pretty much instantly after
	// calling play()
	return ERR_CHK_BOOL(AudioUnitSetProperty(*_unit,
																					 kAudioUnitProperty_ScheduledFileIDs,
																					 kAudioUnitScope_Global,
																					 0, 
																					 fileID, 
																					 sizeof(fileID)),
											"setting file player's file ID");
}
Example #5
0
 music_obj<audio_queue_driver>::music_obj(const std::string& file_path, bool loop, float gain,
                                          float start, float end)
 : packet_index_(0)
 , start_packet_index_(0)
 , stop_packet_index_(0)
 , volume_(gain)
 , loop_(loop)
 , is_paused_(false)
 {
     CFURLRef file_url = CFURLCreateFromFileSystemRepresentation(NULL, (const UInt8 *)file_path.c_str(), file_path.size(), false);
     OSStatus res = AudioFileOpenURL(file_url, kAudioFileReadPermission, kAudioFileCAFType, &audio_file_);
     CFRelease(file_url);
     
     if(res)
     {
         throw sys_exception("audio_queue_driver: couldn't open audio file at '" + file_path + "'");
     }
     
     UInt32 size = sizeof(data_format_);
     AudioFileGetProperty(audio_file_, kAudioFilePropertyDataFormat, &size, &data_format_);
     
     AudioQueueNewOutput(&data_format_, &music_obj<audio_queue_driver>::buffer_cb, this, NULL, NULL, 0, &queue_);        
     AudioQueueAddPropertyListener(queue_, kAudioQueueProperty_IsRunning, &music_obj<audio_queue_driver>::playback_cb, this);
     
     if (data_format_.mBytesPerPacket == 0 || data_format_.mFramesPerPacket == 0)
     {
         size = sizeof(max_packet_size_);
         AudioFileGetProperty(audio_file_, kAudioFilePropertyPacketSizeUpperBound, &size, &max_packet_size_);
         if (max_packet_size_ > BUFFER_SIZE_BYTES)
         {
             max_packet_size_ = BUFFER_SIZE_BYTES;
         }
         
         num_packets_to_read_ = BUFFER_SIZE_BYTES / max_packet_size_;
         packet_descriptions_ = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * num_packets_to_read_);
     }
     else
     {
         num_packets_to_read_ = BUFFER_SIZE_BYTES / data_format_.mBytesPerPacket;
         packet_descriptions_ = NULL;
     }
     
     AudioFileGetPropertyInfo(audio_file_, kAudioFilePropertyMagicCookieData, &size, NULL);
     if (size > 0)
     {
         char* cookie = (char*)malloc(sizeof(char) * size);
         AudioFileGetProperty(audio_file_, kAudioFilePropertyMagicCookieData, &size, cookie);
         AudioQueueSetProperty(queue_, kAudioQueueProperty_MagicCookie, cookie, size);
         free(cookie);
     }
     
     calculate_seek(start, end);
     volume(volume_);
     prime();
 }
Example #6
0
		OSStatus SetupBuffers(BG_FileInfo *inFileInfo)
		{
			int numBuffersToQueue = kNumberBuffers;
			UInt32 maxPacketSize;
			UInt32 size = sizeof(maxPacketSize);
			// we need to calculate how many packets we read at a time, and how big a buffer we need
			// we base this on the size of the packets in the file and an approximate duration for each buffer
				
			// first check to see what the max size of a packet is - if it is bigger
			// than our allocation default size, that needs to become larger
			OSStatus result = AudioFileGetProperty(inFileInfo->mAFID, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize);
				AssertNoError("Error getting packet upper bound size", end);
			bool isFormatVBR = (inFileInfo->mFileFormat.mBytesPerPacket == 0 || inFileInfo->mFileFormat.mFramesPerPacket == 0);

			CalculateBytesForTime(inFileInfo->mFileFormat, maxPacketSize, 0.5/*seconds*/, &mBufferByteSize, &mNumPacketsToRead);
			
			// if the file is smaller than the capacity of all the buffer queues, always load it at once
			if ((mBufferByteSize * numBuffersToQueue) > inFileInfo->mFileDataSize)
				inFileInfo->mLoadAtOnce = true;
				
			if (inFileInfo->mLoadAtOnce)
			{
				UInt64 theFileNumPackets;
				size = sizeof(UInt64);
				result = AudioFileGetProperty(inFileInfo->mAFID, kAudioFilePropertyAudioDataPacketCount, &size, &theFileNumPackets);
					AssertNoError("Error getting packet count for file", end);
				
				mNumPacketsToRead = (UInt32)theFileNumPackets;
				mBufferByteSize = inFileInfo->mFileDataSize;
				numBuffersToQueue = 1;
			}	
			else
			{
				mNumPacketsToRead = mBufferByteSize / maxPacketSize;
			}
			
			if (isFormatVBR)
				mPacketDescs = new AudioStreamPacketDescription [mNumPacketsToRead];
			else
				mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)	
				
			// allocate the queue's buffers
			for (int i = 0; i < numBuffersToQueue; ++i) 
			{
				result = AudioQueueAllocateBuffer(mQueue, mBufferByteSize, &mBuffers[i]);
					AssertNoError("Error allocating buffer for queue", end);
				QueueCallback (this, mQueue, mBuffers[i]);
				if (inFileInfo->mLoadAtOnce)
					inFileInfo->mFileDataInQueue = true;
			}
		
		//end:
			return result;
		}
int	main(int argc, const char *argv[])
{
 	MyAudioConverterSettings audioConverterSettings = {0};
	
	// open the input audio file
	CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, kInputFileLocation, kCFURLPOSIXPathStyle, false);
    CheckResult (AudioFileOpenURL(inputFileURL, kAudioFileReadPermission , 0, &audioConverterSettings.inputFile),
				 "AudioFileOpenURL failed");
	CFRelease(inputFileURL);
	
	// get the audio data format from the file
	UInt32 propSize = sizeof(audioConverterSettings.inputFormat);
    CheckResult (AudioFileGetProperty(audioConverterSettings.inputFile, kAudioFilePropertyDataFormat, &propSize, &audioConverterSettings.inputFormat),
				 "couldn't get file's data format");
	
	// get the total number of packets in the file
	propSize = sizeof(audioConverterSettings.inputFilePacketCount);
    CheckResult (AudioFileGetProperty(audioConverterSettings.inputFile, kAudioFilePropertyAudioDataPacketCount, &propSize, &audioConverterSettings.inputFilePacketCount),
				 "couldn't get file's packet count");
	
	// get size of the largest possible packet
	propSize = sizeof(audioConverterSettings.inputFilePacketMaxSize);
    CheckResult(AudioFileGetProperty(audioConverterSettings.inputFile, kAudioFilePropertyMaximumPacketSize, &propSize, &audioConverterSettings.inputFilePacketMaxSize),
				"couldn't get file's max packet size");
	
	// define the ouput format. AudioConverter requires that one of the data formats be LPCM
    audioConverterSettings.outputFormat.mSampleRate = 44100.0;
	audioConverterSettings.outputFormat.mFormatID = kAudioFormatLinearPCM;
    audioConverterSettings.outputFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
	audioConverterSettings.outputFormat.mBytesPerPacket = 4;
	audioConverterSettings.outputFormat.mFramesPerPacket = 1;
	audioConverterSettings.outputFormat.mBytesPerFrame = 4;
	audioConverterSettings.outputFormat.mChannelsPerFrame = 2;
	audioConverterSettings.outputFormat.mBitsPerChannel = 16;
	
	// create output file
	// KEVIN: TODO: this fails if file exists. isn't there an overwrite flag we can use?
	CFURLRef outputFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, CFSTR("output.aif"), kCFURLPOSIXPathStyle, false);
	CheckResult (AudioFileCreateWithURL(outputFileURL, kAudioFileAIFFType, &audioConverterSettings.outputFormat, kAudioFileFlags_EraseFile, &audioConverterSettings.outputFile),
				 "AudioFileCreateWithURL failed");
    CFRelease(outputFileURL);
	
	fprintf(stdout, "Converting...\n");
	Convert(&audioConverterSettings);
	
cleanup:
	AudioFileClose(audioConverterSettings.inputFile);
	AudioFileClose(audioConverterSettings.outputFile);
	printf("Done\r");
	return 0;
}
Example #8
0
// _______________________________________________________________________________________
//
// call for existing file, NOT new one - from Open() or Wrap()
void	CAAudioFile::GetExistingFileInfo()
{
	LOG_FUNCTION("CAAudioFile::GetExistingFileInfo", "%p", this);
	UInt32 propertySize;
	OSStatus err;

	// get mFileDataFormat
	propertySize = sizeof(AudioStreamBasicDescription);
	XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyDataFormat, &propertySize, &mFileDataFormat), "get audio file's data format");

	// get mFileChannelLayout
	err = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyChannelLayout, &propertySize, NULL);
	if (err == noErr && propertySize > 0) {
		AudioChannelLayout *layout = static_cast<AudioChannelLayout *>(malloc(propertySize));
		err = AudioFileGetProperty(mAudioFile, kAudioFilePropertyChannelLayout, &propertySize, layout);
		if (err == noErr) {
			mFileChannelLayout = layout;
#if VERBOSE_CHANNELMAP
			printf("existing file's channel layout: %s\n", CAChannelLayouts::ConstantToString(mFileChannelLayout.Tag()));
#endif
		}
		free(layout);
		XThrowIfError(err, "get audio file's channel layout");
	}
	if (mMode != kReading)
		return;

#if 0
	// get mNumberPackets
	propertySize = sizeof(mNumberPackets);
	XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyAudioDataPacketCount, &propertySize, &mNumberPackets), "get audio file's packet count");
#if VERBOSE_IO
	printf("CAAudioFile::GetExistingFileInfo: %qd packets\n", mNumberPackets);
#endif
#endif

	// get mMagicCookie
	err = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyMagicCookieData, &propertySize, NULL);
	if (err == noErr && propertySize > 0) {
		mMagicCookie = new Byte[propertySize];
		mMagicCookieSize = propertySize;
		XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyMagicCookieData, &propertySize, mMagicCookie), "get audio file's magic cookie");
	}
	InitFileMaxPacketSize();
	mPacketMark = 0;
	mFrameMark = 0;

	UpdateClientMaxPacketSize();
}
Example #9
0
// _______________________________________________________________________________________
//
void	CAAudioFile::FlushEncoder()
{
	if (mConverter != NULL) {
		mFinishingEncoding = true;
		WritePacketsFromCallback(WriteInputProc, this);
		mFinishingEncoding = false;

		// get priming info from converter, set it on the file
		if (mFileDataFormat.mBitsPerChannel == 0) {
			UInt32 propertySize;
			OSStatus err;
			AudioConverterPrimeInfo primeInfo;
			propertySize = sizeof(primeInfo);

			err = AudioConverterGetProperty(mConverter, kAudioConverterPrimeInfo, &propertySize, &primeInfo);
			if (err == noErr) {
				AudioFilePacketTableInfo pti;
				propertySize = sizeof(pti);
				err = AudioFileGetProperty(mAudioFile, kAudioFilePropertyPacketTableInfo, &propertySize, &pti);
				if (err == noErr) {
//printf("old packet table info: %qd valid, %ld priming, %ld remainder\n", pti.mNumberValidFrames, pti.mPrimingFrames, pti.mRemainderFrames);
					UInt64 totalFrames = pti.mNumberValidFrames + pti.mPrimingFrames + pti.mRemainderFrames;
					pti.mPrimingFrames = primeInfo.leadingFrames;
					pti.mRemainderFrames = primeInfo.trailingFrames;
					pti.mNumberValidFrames = totalFrames - pti.mPrimingFrames - pti.mRemainderFrames;
//printf("new packet table info: %qd valid, %ld priming, %ld remainder\n", pti.mNumberValidFrames, pti.mPrimingFrames, pti.mRemainderFrames);
					XThrowIfError(AudioFileSetProperty(mAudioFile, kAudioFilePropertyPacketTableInfo, sizeof(pti), &pti), "couldn't set packet table info on audio file");
				}
			}
		}
	}
}
Example #10
0
OSStatus LoadFileDataInfo(const char *inFilePath, AudioFileID &outAFID, AudioStreamBasicDescription &outFormat, UInt64 &outDataSize)
{
	UInt32 thePropSize = sizeof(outFormat);				
	OSStatus result = OpenFile(inFilePath, outAFID);
		AssertNoError("Error opening file", end);

	result = AudioFileGetProperty(outAFID, kAudioFilePropertyDataFormat, &thePropSize, &outFormat);
		AssertNoError("Error getting file format", end);
	
	thePropSize = sizeof(UInt64);
	result = AudioFileGetProperty(outAFID, kAudioFilePropertyAudioDataByteCount, &thePropSize, &outDataSize);
		AssertNoError("Error getting file data size", end);

end:
	return result;
}
Example #11
0
void	CAAudioFileConverter::WriteCAFInfo()
{
	FSRef fsref;
	AudioFileID afid = 0;
	CAFSourceInfo info;
	UInt32 size;
	
	try {
		XThrowIfError(FSPathMakeRef((UInt8 *)mParams.input.filePath, &fsref, NULL), "couldn't locate input file");
		XThrowIfError(AudioFileOpen(&fsref, fsRdPerm, 0, &afid), "couldn't open input file");
		size = sizeof(AudioFileTypeID);
		XThrowIfError(AudioFileGetProperty(afid, kAudioFilePropertyFileFormat, &size, &info.filetype), "couldn't get input file's format");
		AudioFileClose(afid);
		afid = 0;
		
		XThrowIfError(FSPathMakeRef((UInt8 *)mOutName, &fsref, NULL), "couldn't locate output file");
		XThrowIfError(AudioFileOpen(&fsref, fsRdWrPerm, 0, &afid), "couldn't open output file");
		const char *srcFilename = strrchr(mParams.input.filePath, '/');
		if (srcFilename++ == NULL) srcFilename = mParams.input.filePath;
		ASBD_NtoB(&mSrcFormat, (AudioStreamBasicDescription *)info.asbd);
		int namelen = std::min(kMaxFilename-1, (int)strlen(srcFilename));
		memcpy(info.filename, srcFilename, namelen);
		info.filename[namelen++] = 0;
		info.filetype = EndianU32_NtoB(info.filetype);
		
		XThrowIfError(AudioFileSetUserData(afid, 'srcI', 0, offsetof(CAFSourceInfo, filename) + namelen, &info), "couldn't set CAF file's source info chunk");
		AudioFileClose(afid);
	}
	catch (...) {
		if (afid)
			AudioFileClose(afid);
		throw;
	}
}
Example #12
0
 double duration() const {
     double dur = 0;
     unsigned int sz = sizeof(dur);
     OSStatus status = AudioFileGetProperty(aqData.mAudioFile, kAudioFilePropertyEstimatedDuration, (UInt32*)&sz, &dur);
     checkStatus(status);
     return dur;
 }
Example #13
0
// _______________________________________________________________________________________
//
SInt64  CAAudioFile::FileDataOffset()
{
	if (mFileDataOffset < 0) {
		UInt32 propertySize = sizeof(SInt64);
		XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyDataOffset, &propertySize, &mFileDataOffset), "couldn't get file's data offset");
	}
	return mFileDataOffset;
}
Example #14
0
    void seek(double sec) {
        double frame = sec * aqData.mDataFormat.mSampleRate;
        
        AudioFramePacketTranslation trans;
        trans.mFrame = frame;

        unsigned int sz = sizeof(trans);
        OSStatus status = AudioFileGetProperty(aqData.mAudioFile, kAudioFilePropertyFrameToPacket, (UInt32*)&sz, &trans);

        seekToPacket(trans.mPacket);
        trans.mFrameOffsetInPacket = 0; // Don't support sub packet seeking..
        
        status = AudioFileGetProperty(aqData.mAudioFile, kAudioFilePropertyPacketToFrame, (UInt32*)&sz, &trans);

        timeBase = trans.mFrame / aqData.mDataFormat.mSampleRate;

    }
void AudioFile::read(Float32 *data, UInt64 *cursor, UInt32 *numFrames)
{
  AudioFramePacketTranslation t;
  UInt32 size = sizeof(AudioFramePacketTranslation);
  t.mFrame = *cursor;
  AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFrameToPacket, &size, &t);
  *mCursor = t.mPacket;
  
  AudioFramePacketTranslation t2;
  t2.mFrame = *numFrames;
  AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFrameToPacket, &size, &t2);
  UInt32 numPacketsToRead = t2.mPacket ? t2.mPacket : 1;
  
  AudioBytePacketTranslation t3;
  t3.mPacket = numPacketsToRead;
  size = sizeof(AudioBytePacketTranslation);
  AudioFileGetProperty(mAudioFileID, kAudioFilePropertyPacketToByte, &size, &t3);
  
  if (mConverterBuffer) free(mConverterBuffer);
  mConverterBuffer = (char*)malloc(t3.mByte);
  mNumPacketsToRead = numPacketsToRead;
  
  UInt32 outNumBytes;
  checkError(AudioFileReadPackets(mAudioFileID, false, &outNumBytes, mPacketDescs, *mCursor, &numPacketsToRead, mConverterBuffer), "AudioFileReadPackets");
  mConvertByteSize = outNumBytes;
  
  UInt32 numFramesToConvert = t.mFrameOffsetInPacket + *numFrames;
  bool interleaved = true;
  interleaved = !(mClientFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved);
  AudioBufferList* tmpbuf = AudioSourceNode::createAudioBufferList(2, interleaved, numFramesToConvert, sizeof(Float32));
  checkError(AudioConverterFillComplexBuffer(mAudioConverterRef, encoderProc, this, &numFramesToConvert, tmpbuf, NULL),
             "AudioConverterFillComplexBuffer");
  
  if (interleaved) {
    Float32* sample = (Float32*)tmpbuf->mBuffers[0].mData;
    memcpy(data, &sample[t.mFrameOffsetInPacket], numFramesToConvert * sizeof(Float32) * mClientFormat.mChannelsPerFrame);
  }
  
  AudioSourceNode::deleteAudioBufferList(tmpbuf);
  
  if (numFramesToConvert == 0) {
    AudioConverterReset(mAudioConverterRef);
  }
  
  *numFrames = numFramesToConvert;
}
Example #16
0
File: main.cpp Project: ebakan/SMS
int main (int argc, char * const argv[]) 
{
    char inputFile[]="blip.mp3";

    static const double threshold=0.50;
    int hardware=macbookpro;
    double x,y,z,prev_x,prev_y,prev_z;

    AudioFileID audioFile;

    CFURLRef theURL = CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault, (UInt8*)inputFile, strlen(inputFile), false);

    
    XThrowIfError (AudioFileOpenURL (theURL, kAudioFileReadPermission, 0, &audioFile), "AudioFileOpenURL");
		    
		    // get the number of channels of the file
    CAStreamBasicDescription fileFormat;
    UInt32 propsize = sizeof(CAStreamBasicDescription);
    XThrowIfError (AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat, &propsize, &fileFormat), "AudioFileGetProperty");

// lets set up our playing state now
    AUGraph theGraph;
    CAAudioUnit fileAU;

// this makes the graph, the file AU and sets it all up for playing
    MakeSimpleGraph (theGraph, fileAU, fileFormat, audioFile);
	    

// now we load the file contents up for playback before we start playing
// this has to be done the AU is initialized and anytime it is reset or uninitialized
    Float64 fileDuration = PrepareFileAU (fileAU, fileFormat, audioFile);
    printf ("file duration: %f secs\n", fileDuration);

    read_sms_real(hardware,&x,&y,&z);
    prev_x=x;
    prev_y=y;
    prev_z=z;
    for(;;) {
	read_sms_real(hardware,&x,&y,&z);
	//printf("x: %f y: %f z: %f\n",x,y,z);
	if(isDelta(threshold,x,y,z,prev_x,prev_y,prev_z))
	    XThrowIfError (AUGraphStart (theGraph), "AUGraphStart");
	prev_x=x;
	prev_y=y;
	prev_z=z;
    }

// sleep until the file is finished
    //usleep ((int)(fileDuration * 1000. * 1000.));

// lets clean up
    XThrowIfError (AUGraphStop (theGraph), "AUGraphStop");
    XThrowIfError (AUGraphUninitialize (theGraph), "AUGraphUninitialize");
    XThrowIfError (AudioFileClose (audioFile), "AudioFileClose");
    XThrowIfError (AUGraphClose (theGraph), "AUGraphClose");
    
    return 0;
}	
Example #17
0
// _______________________________________________________________________________________
//
SInt64  CAAudioFile::GetNumberFrames() const
{
	AudioFilePacketTableInfo pti;
	UInt32 propertySize = sizeof(pti);
	OSStatus err = AudioFileGetProperty(mAudioFile, kAudioFilePropertyPacketTableInfo, &propertySize, &pti);
	if (err == noErr)
		return pti.mNumberValidFrames;
	return mFileDataFormat.mFramesPerPacket * GetNumberPackets() - mFrame0Offset;
}
Example #18
0
		OSStatus SetupQueue(BG_FileInfo *inFileInfo)
		{
			UInt32 size = 0;
			OSStatus result = AudioQueueNewOutput(&inFileInfo->mFileFormat, QueueCallback, this, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &mQueue);
					AssertNoError("Error creating queue", end);

			// (2) If the file has a cookie, we should get it and set it on the AQ
			size = sizeof(UInt32);
			result = AudioFileGetPropertyInfo (inFileInfo->mAFID, kAudioFilePropertyMagicCookieData, &size, NULL);

			if (!result && size) {
				char* cookie = new char [size];		
				result = AudioFileGetProperty (inFileInfo->mAFID, kAudioFilePropertyMagicCookieData, &size, cookie);
					AssertNoError("Error getting magic cookie", end);
				result = AudioQueueSetProperty(mQueue, kAudioQueueProperty_MagicCookie, cookie, size);
				delete [] cookie;
					AssertNoError("Error setting magic cookie", end);
			}

			// channel layout
			OSStatus err = AudioFileGetPropertyInfo(inFileInfo->mAFID, kAudioFilePropertyChannelLayout, &size, NULL);
			if (err == noErr && size > 0) {
				AudioChannelLayout *acl = (AudioChannelLayout *)malloc(size);
				result = AudioFileGetProperty(inFileInfo->mAFID, kAudioFilePropertyChannelLayout, &size, acl);
					AssertNoError("Error getting channel layout from file", end);
				result = AudioQueueSetProperty(mQueue, kAudioQueueProperty_ChannelLayout, acl, size);
				free(acl);
					AssertNoError("Error setting channel layout on queue", end);
			}
			
			// add a notification proc for when the queue stops
			result = AudioQueueAddPropertyListener(mQueue, kAudioQueueProperty_IsRunning, QueueStoppedProc, this);
				AssertNoError("Error adding isRunning property listener to queue", end);
				
			// we need to reset this variable so that if the queue is stopped mid buffer we don't dispose it 
			mMakeNewQueueWhenStopped = false;
			
			// volume
			result = SetVolume(mVolume);
			
		//end:
			return result;
		}
// many encoded formats require a 'magic cookie'. if the file has a cookie we get it
// and configure the queue with it
static void MyCopyEncoderCookieToQueue(AudioFileID theFile, AudioQueueRef queue ) {
	UInt32 propertySize;
	OSStatus result = AudioFileGetPropertyInfo (theFile, kAudioFilePropertyMagicCookieData, &propertySize, NULL);
	if (result == noErr && propertySize > 0)
	{
		Byte* magicCookie = (UInt8*)malloc(sizeof(UInt8) * propertySize);	
		CheckError(AudioFileGetProperty (theFile, kAudioFilePropertyMagicCookieData, &propertySize, magicCookie), "get cookie from file failed");
		CheckError(AudioQueueSetProperty(queue, kAudioQueueProperty_MagicCookie, magicCookie, propertySize), "set cookie on queue failed");
		free(magicCookie);
	}
}
Example #20
0
SInt64	HLAudioFile::GetAudioFrameSize() const
{
	ThrowIf(mAudioFileID == 0, CAException(fnOpnErr), "HLAudioFile::GetAudioFrameSize: file isn't prepared");
	
	UInt32 theSize = sizeof(UInt64);
	UInt64 theAnswer = 0;
	OSStatus theError = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyAudioDataPacketCount, &theSize, &theAnswer);
	ThrowIfError(theError, CAException(theError), "HLAudioFile::GetAudioFrameSize: couldn't get the property");
	
	return theAnswer;
}
Example #21
0
void AudioFile::loadHeader()
{
#if defined( FLI_MAC )
	OSStatus err = noErr;
	AudioStreamBasicDescription nativeFormatDescription;
	UInt32 size = sizeof( AudioStreamBasicDescription );
	err = AudioFileGetProperty( mNativeFileRef, kAudioFilePropertyDataFormat, &size, &nativeFormatDescription );
	if( err ) {
		std::cout << "error getting file data format" << std::endl;
	}
	mSampleRate = nativeFormatDescription.mSampleRate;
	mNativeFormatID = nativeFormatDescription.mFormatID;
	mNativeFormatFlags = nativeFormatDescription.mFormatFlags;
	mBytesPerPacket = nativeFormatDescription.mBytesPerPacket;
	mFramesPerPacket = nativeFormatDescription.mFramesPerPacket;
	mBytesPerFrame = nativeFormatDescription.mBytesPerFrame;
	mChannelCount = nativeFormatDescription.mChannelsPerFrame;
	mBitsPerSample = nativeFormatDescription.mBitsPerChannel;
	mReserved = nativeFormatDescription.mReserved;
	
	size = sizeof( uint64_t );
	err = AudioFileGetProperty( mNativeFileRef, kAudioFilePropertyAudioDataPacketCount, &size, &mPacketCount );
	if( err ) {
		std::cout << "error getting file packet count" << std::endl;
	}
	
	size = sizeof( uint64_t );
	err = AudioFileGetProperty( mNativeFileRef, kAudioFilePropertyAudioDataByteCount, &size, &mByteCount );
	if( err ) {
		std::cout << "error getting file byte count" << std::endl;
	}
	
	size = sizeof( uint32_t );
	err = AudioFileGetProperty( mNativeFileRef, kAudioFilePropertyMaximumPacketSize, &size, &mMaxPacketSize );
	if( err ) {
		std::cout << "error getting file max packet size count" << std::endl;
	}
#endif
}
const OSStatus AudioFile::setClientFormat(const AudioStreamBasicDescription clientASBD)
{
  checkError(AudioConverterNew(&mInputFormat, &clientASBD, &mAudioConverterRef), "AudioConverterNew");
  mClientFormat = clientASBD;
  
  UInt32 size = sizeof(UInt32);
  UInt32 maxPacketSize;
  checkError(AudioFileGetProperty(mAudioFileID, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "AudioFileGetProperty");
  
  if (mIsVBR) {
    mPacketDescs = (AudioStreamPacketDescription*)calloc(mNumPacketsToRead, sizeof(AudioStreamPacketDescription));
  }
  
  // set magic cookie to the AudioConverter
  {
    UInt32 cookieSize;
    OSStatus err = AudioFileGetPropertyInfo(mAudioFileID, 
                                            kAudioFilePropertyMagicCookieData, 
                                            &cookieSize, 
                                            NULL);
    
    if (err == noErr && cookieSize > 0){
      char *magicCookie = (char*)malloc(sizeof(UInt8) * cookieSize);
      UInt32	magicCookieSize = cookieSize;
      AudioFileGetProperty(mAudioFileID,
                           kAudioFilePropertyMagicCookieData,
                           &magicCookieSize,
                           magicCookie);
      
      AudioConverterSetProperty(mAudioConverterRef,
                                kAudioConverterDecompressionMagicCookie,
                                magicCookieSize,
                                magicCookie);
      free(magicCookie);
    }
  }
  
  return noErr;
}
BEGIN_UGEN_NAMESPACE

#include "ugen_iPhoneAudioFileDiskIn.h"
#include "ugen_NSUtilities.h"

DiskInUGenInternal::DiskInUGenInternal(AudioFileID audioFile, 
									   AudioStreamBasicDescription const& format, 
									   const bool loopFlag, 
									   const double startTime,
									   const UGen::DoneAction doneAction) throw()
:	ProxyOwnerUGenInternal(0, format.mChannelsPerFrame - 1),
	audioFile_(audioFile),
	numChannels_(format.mChannelsPerFrame),
	loopFlag_(loopFlag),
	startTime_(startTime),
	packetCount(0),
	currentPacket(0),
	allocatedBlockSize(0),
	audioData(0),
	numPackets(0),
	bytesPerFrame(format.mBytesPerFrame),
	fileSampleRate(format.mSampleRate),
	reciprocalSampleRate(1.0/fileSampleRate),
	doneAction_(doneAction),
	shouldDeleteValue(doneAction_ == UGen::DeleteWhenDone)
{
	OSStatus result;
	UInt32 dataSize;
	
	if(audioFile_)
	{
		dataSize = sizeof(packetCount);
		result = AudioFileGetProperty(audioFile, kAudioFilePropertyAudioDataPacketCount, &dataSize, &packetCount);
		if (result != noErr) 
		{
			printf("DiskIn: error: Could not get packet count: ID=%p err=%d\n", audioFile_, (int)result);
			AudioFileClose(audioFile_);
			audioFile_ = 0;
		}	
		else
		{
			currentPacket = ugen::max(0.0, startTime) * fileSampleRate;
			if(currentPacket >= packetCount)
				currentPacket = 0;
			
			allocatedBlockSize = UGen::getEstimatedBlockSize();
			audioData = malloc(bytesPerFrame * allocatedBlockSize);
		}
	}
}
Example #24
0
// _______________________________________________________________________________________
//
void	CAAudioFile::InitFileMaxPacketSize()
{
	LOG_FUNCTION("CAAudioFile::InitFileMaxPacketSize", "%p", this);
	UInt32 propertySize = sizeof(UInt32);
	OSStatus err = AudioFileGetProperty(mAudioFile, kAudioFilePropertyMaximumPacketSize,
		&propertySize, &mFileMaxPacketSize);
	if (err) {
		// workaround for 3361377: not all file formats' maximum packet sizes are supported
		if (!mFileDataFormat.IsPCM())
			XThrowIfError(err, "get audio file's maximum packet size");
		mFileMaxPacketSize = mFileDataFormat.mBytesPerFrame;
	}
	AllocateBuffers(true /* okToFail */);
}
Example #25
0
void	GetFormatFromInputFile (AudioFileID inputFile, CAStreamBasicDescription & inputFormat)
{
	bool doPrint = true;
	UInt32 size;
	XThrowIfError(AudioFileGetPropertyInfo(inputFile,
                                           kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info");
	UInt32 numFormats = size / sizeof(AudioFormatListItem);
	AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ];
    
	XThrowIfError(AudioFileGetProperty(inputFile,
                                       kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format");
	numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it
	if (numFormats == 1) {
        // this is the common case
		inputFormat = formatList[0].mASBD;
	} else {
		if (doPrint) {
			printf ("File has a %d layered data format:\n", (int)numFormats);
			for (unsigned int i = 0; i < numFormats; ++i)
				CAStreamBasicDescription(formatList[i].mASBD).Print();
			printf("\n");
		}
		// now we should look to see which decoders we have on the system
		XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's");
		UInt32 numDecoders = size / sizeof(OSType);
		OSType *decoderIDs = new OSType [ numDecoders ];
		XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's");
		unsigned int i = 0;
		for (; i < numFormats; ++i) {
			OSType decoderID = formatList[i].mASBD.mFormatID;
			bool found = false;
			for (unsigned int j = 0; j < numDecoders; ++j) {
				if (decoderID == decoderIDs[j]) {
					found = true;
					break;
				}
			}
			if (found) break;
		}
		delete [] decoderIDs;
		
		if (i >= numFormats) {
			fprintf (stderr, "Cannot play any of the formats in this file\n");
			throw kAudioFileUnsupportedDataFormatError;
		}
		inputFormat = formatList[i].mASBD;
	}
	delete [] formatList;
}
// Sets the packet table containing information about the number of valid frames in a file and where they begin and end
// for the file types that support this information.
// Calling this function makes sure we write out the priming and remainder details to the destination file	
static void WritePacketTableInfo(AudioConverterRef converter, AudioFileID destinationFileID)
{
    UInt32 isWritable;
    UInt32 dataSize;
    OSStatus error = AudioFileGetPropertyInfo(destinationFileID, kAudioFilePropertyPacketTableInfo, &dataSize, &isWritable);
    if (noErr == error && isWritable) {

        AudioConverterPrimeInfo primeInfo;
        dataSize = sizeof(primeInfo);

        // retrieve the leadingFrames and trailingFrames information from the converter,
        error = AudioConverterGetProperty(converter, kAudioConverterPrimeInfo, &dataSize, &primeInfo);
        if (noErr == error) {
            // we have some priming information to write out to the destination file
            /* The total number of packets in the file times the frames per packet (or counting each packet's
               frames individually for a variable frames per packet format) minus mPrimingFrames, minus
               mRemainderFrames, should equal mNumberValidFrames.
            */
            AudioFilePacketTableInfo pti;
            dataSize = sizeof(pti);
            error = AudioFileGetProperty(destinationFileID, kAudioFilePropertyPacketTableInfo, &dataSize, &pti);
            if (noErr == error) {
                // there's priming to write out to the file
                UInt64 totalFrames = pti.mNumberValidFrames + pti.mPrimingFrames + pti.mRemainderFrames; // get the total number of frames from the output file
                printf("Total number of frames from output file: %lld\n", totalFrames);
                
                pti.mPrimingFrames = primeInfo.leadingFrames;
                pti.mRemainderFrames = primeInfo.trailingFrames;
                pti.mNumberValidFrames = totalFrames - pti.mPrimingFrames - pti.mRemainderFrames;
            
                error = AudioFileSetProperty(destinationFileID, kAudioFilePropertyPacketTableInfo, sizeof(pti), &pti);
                if (noErr == error) {
                    printf("Writing packet table information to destination file: %ld\n", sizeof(pti));
                    printf("     Total valid frames: %lld\n", pti.mNumberValidFrames);
                    printf("         Priming frames: %ld\n", pti.mPrimingFrames);
                    printf("       Remainder frames: %ld\n", pti.mRemainderFrames);
                } else {
                    printf("Some audio files can't contain packet table information and that's OK\n");
                }
            } else {
                 printf("Getting kAudioFilePropertyPacketTableInfo error: %ld\n", error);
            }
        } else {
            printf("No kAudioConverterPrimeInfo available and that's OK\n");
        }
    } else {
        printf("GetPropertyInfo for kAudioFilePropertyPacketTableInfo error: %ld, isWritable: %ld\n", error, isWritable);
    }
}
Example #27
0
/* Note: I found this tech note:
 http://developer.apple.com/library/mac/#qa/qa2009/qa1678.html
 I don't know if this applies to us. So far, I haven't noticed the problem,
 so I haven't applied any of the techniques.
 */
static int CoreAudio_seek(Sound_Sample *sample, Uint32 ms)
{
	OSStatus error_result = noErr;	
	Sound_SampleInternal *internal = (Sound_SampleInternal *) sample->opaque;
	CoreAudioFileContainer* core_audio_file_container = (CoreAudioFileContainer *) internal->decoder_private;
	SInt64 frame_offset = 0;
	AudioStreamBasicDescription	actual_format;
	UInt32 format_size;

	
	/* I'm confused. The Apple documentation says this:
	"Seek position is specified in the sample rate and frame count of the file’s audio data format
	— not your application’s audio data format."
	My interpretation is that I want to get the "actual format of the file and compute the frame offset.
	But when I try that, seeking goes to the wrong place.
	When I use outputFormat, things seem to work correctly.
	I must be misinterpreting the documentation or doing something wrong.
	*/
#if 0 /* not working */
    format_size = sizeof(AudioStreamBasicDescription);
    error_result = AudioFileGetProperty(
		*core_audio_file_container->audioFileID,
		kAudioFilePropertyDataFormat,
		&format_size,
		&actual_format
	);
    if(error_result != noErr)
	{
		sample->flags |= SOUND_SAMPLEFLAG_ERROR;
		BAIL_MACRO("Core Audio: Could not GetProperty for kAudioFilePropertyDataFormat.", 0);
	} /* if */

	// packetIndex = (pos * sampleRate) / framesPerPacket
	//	frame_offset = (SInt64)((ms/1000.0 * actual_format.mSampleRate) / actual_format.mFramesPerPacket);
#else /* seems to work, but I'm confused */
	// packetIndex = (pos * sampleRate) / framesPerPacket
	frame_offset = (SInt64)((ms/1000.0 * core_audio_file_container->outputFormat->mSampleRate) / core_audio_file_container->outputFormat->mFramesPerPacket);	
#endif

	// computed against actual format and not the client format
	error_result = ExtAudioFileSeek(core_audio_file_container->extAudioFileRef, frame_offset);
	if(error_result != noErr)
	{
		sample->flags |= SOUND_SAMPLEFLAG_ERROR;
	}
	
	return(1);
} /* CoreAudio_seek */
Example #28
0
void
CoreAudio_PlayFile(char *const fileName)
{
    const char *inputFile = fileName;
    pthread_t CAThread;

    /* first time through initialise the mutex */
    if (!fCAInitialised) {
        pthread_mutex_init(&mutexCAAccess, NULL);
        fCAInitialised = TRUE;
    }

    /*  Apparently CoreAudio is not fully reentrant */
    pthread_mutex_lock(&mutexCAAccess);

    /* Open the sound file */
    CFURLRef outInputFileURL = CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault,
                                                                       (const UInt8 *) fileName, strlen(fileName),
                                                                       false);
    if (AudioFileOpenURL(outInputFileURL, kAudioFileReadPermission, 0, &audioFile)) {
        outputf(_("Apple CoreAudio Error, can't find %s\n"), fileName);
        return;
    }

    /* Get properties of the file */
    AudioStreamBasicDescription fileFormat;
    UInt32 propsize = sizeof(AudioStreamBasicDescription);
    CoreAudioChkError(AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat,
                                           &propsize, &fileFormat), "AudioFileGetProperty Dataformat",);

    /* Setup sound state */
    AudioUnit fileAU;
    memset(&fileAU, 0, sizeof(AudioUnit));
    memset(&theGraph, 0, sizeof(AUGraph));

    /* Setup a simple output graph and AU */
    CoreAudio_MakeSimpleGraph(&theGraph, &fileAU, &fileFormat, audioFile);

    /* Load the file contents */
    fileDuration = CoreAudio_PrepareFileAU(&fileAU, &fileFormat, audioFile);

    if (pthread_create(&CAThread, 0L, (void *)CoreAudio_PlayFile_Thread, NULL) == 0)
        pthread_detach(CAThread);
    else {
        CoreAudio_ShutDown();
        pthread_mutex_unlock(&mutexCAAccess);
    }
}
Example #29
0
void	CAAudioFileConverter::ReadCAFInfo()
{
	FSRef fsref;
	AudioFileID afid = 0;
	CAFSourceInfo info;
	UInt32 size;
	OSStatus err;
	
	try {
		XThrowIfError(FSPathMakeRef((UInt8 *)mParams.input.filePath, &fsref, NULL), "couldn't locate input file");
		XThrowIfError(AudioFileOpen(&fsref, fsRdPerm, 0, &afid), "couldn't open input file");
		size = sizeof(AudioFileTypeID);
		XThrowIfError(AudioFileGetProperty(afid, kAudioFilePropertyFileFormat, &size, &info.filetype), "couldn't get input file's format");
		if (info.filetype == kAudioFileCAFType) {
			size = sizeof(info);
			err = AudioFileGetUserData(afid, 'srcI', 0, &size, &info);
			if (!err) {
				// restore the following from the original file info:
				//	filetype
				//	data format
				//	filename
				AudioStreamBasicDescription destfmt;
				ASBD_BtoN((AudioStreamBasicDescription *)info.asbd, &destfmt);
				mParams.output.dataFormat = destfmt;
				mParams.output.fileType = EndianU32_BtoN(info.filetype);
				if (mParams.output.filePath == NULL) {
					int len = strlen(mParams.input.filePath) + strlen(info.filename) + 2;
					char *newname = (char *)malloc(len);	// $$$ leaked
					
					const char *dir = dirname(mParams.input.filePath);
					if (dir && (dir[0] !='.' && dir[1] != '/'))
						sprintf(newname, "%s/%s", dir, info.filename);
					else
						strcpy(newname, info.filename);
					mParams.output.filePath = newname;
					mParams.flags = (mParams.flags & ~kOpt_OverwriteOutputFile) | kOpt_NoSanitizeOutputFormat;
				}
			}
		}
		AudioFileClose(afid);
	}
	catch (...) {
		if (afid)
			AudioFileClose(afid);
		throw;
	}
}
Example #30
0
SInt64	CAAudioFile::FrameToPacket(SInt64 inFrame) const
{
	AudioFramePacketTranslation trans;
	UInt32 propertySize;

	switch (mFileDataFormat.mFramesPerPacket) {
	case 1:
		return inFrame;
	case 0:
		trans.mFrame = inFrame;
		propertySize = sizeof(trans);
		XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyFrameToPacket, &propertySize, &trans),
			"packet <-> frame translation unimplemented for format with variable frames/packet");
		return trans.mPacket;
	}
	return inFrame / mFileDataFormat.mFramesPerPacket;
}