AudioFile::AudioFile(CFURLRef fileURL) : mAudioFileID(0), mAudioConverterRef(0), mTotalFrames(0), mFileSize(0), mIsVBR(false), mNumPacketsToRead(0), mPacketDescs(0), mConverterBuffer(0), mCursor(0) { checkError(AudioFileOpenURL(fileURL, kAudioFileReadPermission, NULL, &mAudioFileID), "AudioFileOpenURL"); UInt32 size = sizeof(AudioStreamBasicDescription); checkError(AudioFileGetProperty(mAudioFileID, kAudioFilePropertyDataFormat, &size, &mInputFormat), "AudioFileGetProperty"); size = sizeof(UInt64); checkError(AudioFileGetProperty(mAudioFileID, kAudioFilePropertyAudioDataByteCount, &size, &mFileSize), "AudioFileGetProperty"); if (mInputFormat.mBytesPerFrame == 0) { mIsVBR = true; } UInt64 totalPackets; size = sizeof(UInt64); checkError(AudioFileGetProperty(mAudioFileID, kAudioFilePropertyAudioDataPacketCount, &size, &totalPackets), "AudioFileGetProperty"); if (!mIsVBR) { mTotalFrames = totalPackets; } else { AudioFramePacketTranslation translation; translation.mPacket = totalPackets; size = sizeof(AudioFramePacketTranslation); checkError(AudioFileGetProperty(mAudioFileID, kAudioFilePropertyPacketToFrame, &size, &translation), "AudioFileGetProperty"); mTotalFrames = translation.mFrame; } mCursor = (UInt64*)calloc(1, sizeof(UInt64)); *mCursor = 0; std::cout << "Total Packets : " << mTotalFrames << std::endl; }
bool ofxAudioUnitFilePlayer::setFile(const std::string &filePath) { CFURLRef fileURL; fileURL = CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault, (const UInt8 *)filePath.c_str(), filePath.length(), NULL); if(_fileID[0]) { AudioFileClose(_fileID[0]); _fileID[0] = NULL; } OSStatus s = AudioFileOpenURL(fileURL, kAudioFileReadPermission, 0, _fileID); CFRelease(fileURL); _primed = false; if(s != noErr) { cout << "Error " << s << " while opening file at " << filePath << endl; return false; } else { // setting the file ID now since it seems to have some overhead. // Doing it now ensures you'll get sound pretty much instantly after // calling play() (subsequent calls don't have the overhead) OFXAU_RET_BOOL(AudioUnitSetProperty(*_unit, kAudioUnitProperty_ScheduledFileIDs, kAudioUnitScope_Global, 0, _fileID, sizeof(_fileID)), "setting file player's file ID"); } }
// ---------------------------------------------------------- bool ofxAudioUnitFilePlayer::setFile(std::string filePath) // ---------------------------------------------------------- { CFURLRef fileURL = CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault, (const UInt8 *)filePath.c_str(), filePath.length(), NULL); if(fileID[0]) AudioFileClose(fileID[0]); OSStatus s = AudioFileOpenURL(fileURL, kAudioFileReadPermission, 0, fileID); CFRelease(fileURL); if(s != noErr) { if(s == fnfErr) { cout << "File not found : " << filePath << endl; } else { cout << "Error " << s << " while opening file at " << filePath << endl; } return false; } UInt64 numPackets = 0; UInt32 dataSize = sizeof(numPackets); AudioFileGetProperty(fileID[0], kAudioFilePropertyAudioDataPacketCount, &dataSize, &numPackets); AudioStreamBasicDescription asbd = {0}; dataSize = sizeof(asbd); AudioFileGetProperty(fileID[0], kAudioFilePropertyDataFormat, &dataSize, &asbd); // defining a region which basically says "play the whole file" memset(®ion, 0, sizeof(region)); region.mTimeStamp.mFlags = kAudioTimeStampSampleTimeValid; region.mTimeStamp.mSampleTime = 0; region.mCompletionProc = NULL; region.mCompletionProcUserData = NULL; region.mAudioFile = fileID[0]; region.mLoopCount = 0; region.mStartFrame = 0; region.mFramesToPlay = numPackets * asbd.mFramesPerPacket; // setting the file ID now since it seems to have some overhead. // Doing it now ensures you'll get sound pretty much instantly after // calling play() return ERR_CHK_BOOL(AudioUnitSetProperty(*_unit, kAudioUnitProperty_ScheduledFileIDs, kAudioUnitScope_Global, 0, fileID, sizeof(fileID)), "setting file player's file ID"); }
//================================================================================================== // Helper functions //================================================================================================== OSStatus OpenFile(const char *inFilePath, AudioFileID &outAFID) { CFURLRef theURL = CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault, (UInt8*)inFilePath, strlen(inFilePath), false); if (theURL == NULL) return kSoundEngineErrFileNotFound; #if TARGET_OS_IPHONE OSStatus result = AudioFileOpenURL(theURL, kAudioFileReadPermission, 0, &outAFID); #else OSStatus result = AudioFileOpenURL(theURL, fsRdPerm, 0, &outAFID); #endif CFRelease(theURL); AssertNoError("Error opening file", end); end: return result; }
int main (int argc, char * const argv[]) { char inputFile[]="blip.mp3"; static const double threshold=0.50; int hardware=macbookpro; double x,y,z,prev_x,prev_y,prev_z; AudioFileID audioFile; CFURLRef theURL = CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault, (UInt8*)inputFile, strlen(inputFile), false); XThrowIfError (AudioFileOpenURL (theURL, kAudioFileReadPermission, 0, &audioFile), "AudioFileOpenURL"); // get the number of channels of the file CAStreamBasicDescription fileFormat; UInt32 propsize = sizeof(CAStreamBasicDescription); XThrowIfError (AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat, &propsize, &fileFormat), "AudioFileGetProperty"); // lets set up our playing state now AUGraph theGraph; CAAudioUnit fileAU; // this makes the graph, the file AU and sets it all up for playing MakeSimpleGraph (theGraph, fileAU, fileFormat, audioFile); // now we load the file contents up for playback before we start playing // this has to be done the AU is initialized and anytime it is reset or uninitialized Float64 fileDuration = PrepareFileAU (fileAU, fileFormat, audioFile); printf ("file duration: %f secs\n", fileDuration); read_sms_real(hardware,&x,&y,&z); prev_x=x; prev_y=y; prev_z=z; for(;;) { read_sms_real(hardware,&x,&y,&z); //printf("x: %f y: %f z: %f\n",x,y,z); if(isDelta(threshold,x,y,z,prev_x,prev_y,prev_z)) XThrowIfError (AUGraphStart (theGraph), "AUGraphStart"); prev_x=x; prev_y=y; prev_z=z; } // sleep until the file is finished //usleep ((int)(fileDuration * 1000. * 1000.)); // lets clean up XThrowIfError (AUGraphStop (theGraph), "AUGraphStop"); XThrowIfError (AUGraphUninitialize (theGraph), "AUGraphUninitialize"); XThrowIfError (AudioFileClose (audioFile), "AudioFileClose"); XThrowIfError (AUGraphClose (theGraph), "AUGraphClose"); return 0; }
music_obj<audio_queue_driver>::music_obj(const std::string& file_path, bool loop, float gain, float start, float end) : packet_index_(0) , start_packet_index_(0) , stop_packet_index_(0) , volume_(gain) , loop_(loop) , is_paused_(false) { CFURLRef file_url = CFURLCreateFromFileSystemRepresentation(NULL, (const UInt8 *)file_path.c_str(), file_path.size(), false); OSStatus res = AudioFileOpenURL(file_url, kAudioFileReadPermission, kAudioFileCAFType, &audio_file_); CFRelease(file_url); if(res) { throw sys_exception("audio_queue_driver: couldn't open audio file at '" + file_path + "'"); } UInt32 size = sizeof(data_format_); AudioFileGetProperty(audio_file_, kAudioFilePropertyDataFormat, &size, &data_format_); AudioQueueNewOutput(&data_format_, &music_obj<audio_queue_driver>::buffer_cb, this, NULL, NULL, 0, &queue_); AudioQueueAddPropertyListener(queue_, kAudioQueueProperty_IsRunning, &music_obj<audio_queue_driver>::playback_cb, this); if (data_format_.mBytesPerPacket == 0 || data_format_.mFramesPerPacket == 0) { size = sizeof(max_packet_size_); AudioFileGetProperty(audio_file_, kAudioFilePropertyPacketSizeUpperBound, &size, &max_packet_size_); if (max_packet_size_ > BUFFER_SIZE_BYTES) { max_packet_size_ = BUFFER_SIZE_BYTES; } num_packets_to_read_ = BUFFER_SIZE_BYTES / max_packet_size_; packet_descriptions_ = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * num_packets_to_read_); } else { num_packets_to_read_ = BUFFER_SIZE_BYTES / data_format_.mBytesPerPacket; packet_descriptions_ = NULL; } AudioFileGetPropertyInfo(audio_file_, kAudioFilePropertyMagicCookieData, &size, NULL); if (size > 0) { char* cookie = (char*)malloc(sizeof(char) * size); AudioFileGetProperty(audio_file_, kAudioFilePropertyMagicCookieData, &size, cookie); AudioQueueSetProperty(queue_, kAudioQueueProperty_MagicCookie, cookie, size); free(cookie); } calculate_seek(start, end); volume(volume_); prime(); }
int main(int argc, const char *argv[]) { MyAudioConverterSettings audioConverterSettings = {0}; // open the input audio file CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, kInputFileLocation, kCFURLPOSIXPathStyle, false); CheckResult (AudioFileOpenURL(inputFileURL, kAudioFileReadPermission , 0, &audioConverterSettings.inputFile), "AudioFileOpenURL failed"); CFRelease(inputFileURL); // get the audio data format from the file UInt32 propSize = sizeof(audioConverterSettings.inputFormat); CheckResult (AudioFileGetProperty(audioConverterSettings.inputFile, kAudioFilePropertyDataFormat, &propSize, &audioConverterSettings.inputFormat), "couldn't get file's data format"); // get the total number of packets in the file propSize = sizeof(audioConverterSettings.inputFilePacketCount); CheckResult (AudioFileGetProperty(audioConverterSettings.inputFile, kAudioFilePropertyAudioDataPacketCount, &propSize, &audioConverterSettings.inputFilePacketCount), "couldn't get file's packet count"); // get size of the largest possible packet propSize = sizeof(audioConverterSettings.inputFilePacketMaxSize); CheckResult(AudioFileGetProperty(audioConverterSettings.inputFile, kAudioFilePropertyMaximumPacketSize, &propSize, &audioConverterSettings.inputFilePacketMaxSize), "couldn't get file's max packet size"); // define the ouput format. AudioConverter requires that one of the data formats be LPCM audioConverterSettings.outputFormat.mSampleRate = 44100.0; audioConverterSettings.outputFormat.mFormatID = kAudioFormatLinearPCM; audioConverterSettings.outputFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; audioConverterSettings.outputFormat.mBytesPerPacket = 4; audioConverterSettings.outputFormat.mFramesPerPacket = 1; audioConverterSettings.outputFormat.mBytesPerFrame = 4; audioConverterSettings.outputFormat.mChannelsPerFrame = 2; audioConverterSettings.outputFormat.mBitsPerChannel = 16; // create output file // KEVIN: TODO: this fails if file exists. isn't there an overwrite flag we can use? CFURLRef outputFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, CFSTR("output.aif"), kCFURLPOSIXPathStyle, false); CheckResult (AudioFileCreateWithURL(outputFileURL, kAudioFileAIFFType, &audioConverterSettings.outputFormat, kAudioFileFlags_EraseFile, &audioConverterSettings.outputFile), "AudioFileCreateWithURL failed"); CFRelease(outputFileURL); fprintf(stdout, "Converting...\n"); Convert(&audioConverterSettings); cleanup: AudioFileClose(audioConverterSettings.inputFile); AudioFileClose(audioConverterSettings.outputFile); printf("Done\r"); return 0; }
void CoreAudio_PlayFile(char *const fileName) { const char *inputFile = fileName; pthread_t CAThread; /* first time through initialise the mutex */ if (!fCAInitialised) { pthread_mutex_init(&mutexCAAccess, NULL); fCAInitialised = TRUE; } /* Apparently CoreAudio is not fully reentrant */ pthread_mutex_lock(&mutexCAAccess); /* Open the sound file */ CFURLRef outInputFileURL = CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault, (const UInt8 *) fileName, strlen(fileName), false); if (AudioFileOpenURL(outInputFileURL, kAudioFileReadPermission, 0, &audioFile)) { outputf(_("Apple CoreAudio Error, can't find %s\n"), fileName); return; } /* Get properties of the file */ AudioStreamBasicDescription fileFormat; UInt32 propsize = sizeof(AudioStreamBasicDescription); CoreAudioChkError(AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat, &propsize, &fileFormat), "AudioFileGetProperty Dataformat",); /* Setup sound state */ AudioUnit fileAU; memset(&fileAU, 0, sizeof(AudioUnit)); memset(&theGraph, 0, sizeof(AUGraph)); /* Setup a simple output graph and AU */ CoreAudio_MakeSimpleGraph(&theGraph, &fileAU, &fileFormat, audioFile); /* Load the file contents */ fileDuration = CoreAudio_PrepareFileAU(&fileAU, &fileFormat, audioFile); if (pthread_create(&CAThread, 0L, (void *)CoreAudio_PlayFile_Thread, NULL) == 0) pthread_detach(CAThread); else { CoreAudio_ShutDown(); pthread_mutex_unlock(&mutexCAAccess); } }
int main (int argc, const char * argv[]) { #if TARGET_OS_WIN32 InitializeQTML(0L); #endif const char *fpath = NULL; Float32 volume = 1; Float32 duration = -1; Float32 currentTime = 0.0; Float32 rate = 0; int rQuality = 0; bool doPrint = false; for (int i = 1; i < argc; ++i) { const char *arg = argv[i]; if (arg[0] != '-') { if (fpath != NULL) { fprintf(stderr, "may only specify one file to play\n"); usage(); } fpath = arg; } else { arg += 1; if (arg[0] == 'v' || !strcmp(arg, "-volume")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%f", &volume); } else if (arg[0] == 't' || !strcmp(arg, "-time")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%f", &duration); } else if (arg[0] == 'r' || !strcmp(arg, "-rate")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%f", &rate); } else if (arg[0] == 'q' || !strcmp(arg, "-rQuality")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%d", &rQuality); } else if (arg[0] == 'h' || !strcmp(arg, "-help")) { usage(); } else if (arg[0] == 'd' || !strcmp(arg, "-debug")) { doPrint = true; } else { fprintf(stderr, "unknown argument: %s\n\n", arg - 1); usage(); } } } if (fpath == NULL) usage(); if (doPrint) printf ("Playing file: %s\n", fpath); try { AQTestInfo myInfo; CFURLRef sndFile = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)fpath, strlen(fpath), false); if (!sndFile) XThrowIfError (!sndFile, "can't parse file path"); OSStatus result = AudioFileOpenURL (sndFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile); CFRelease (sndFile); XThrowIfError(result, "AudioFileOpen failed"); UInt32 size; XThrowIfError(AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info"); UInt32 numFormats = size / sizeof(AudioFormatListItem); AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ]; XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format"); numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it if (numFormats == 1) { // this is the common case myInfo.mDataFormat = formatList[0].mASBD; // see if there is a channel layout (multichannel file) result = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, NULL); if (result == noErr && myInfo.mChannelLayoutSize > 0) { myInfo.mChannelLayout = (AudioChannelLayout *)new char [myInfo.mChannelLayoutSize]; XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, myInfo.mChannelLayout), "get audio file's channel layout"); } } else { if (doPrint) { printf ("File has a %d layered data format:\n", (int)numFormats); for (unsigned int i = 0; i < numFormats; ++i) CAStreamBasicDescription(formatList[i].mASBD).Print(); } // now we should look to see which decoders we have on the system XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's"); UInt32 numDecoders = size / sizeof(OSType); OSType *decoderIDs = new OSType [ numDecoders ]; XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's"); unsigned int i = 0; for (; i < numFormats; ++i) { OSType decoderID = formatList[i].mASBD.mFormatID; bool found = false; for (unsigned int j = 0; j < numDecoders; ++j) { if (decoderID == decoderIDs[j]) { found = true; break; } } if (found) break; } delete [] decoderIDs; if (i >= numFormats) { fprintf (stderr, "Cannot play any of the formats in this file\n"); throw kAudioFileUnsupportedDataFormatError; } myInfo.mDataFormat = formatList[i].mASBD; myInfo.mChannelLayoutSize = sizeof(AudioChannelLayout); myInfo.mChannelLayout = (AudioChannelLayout*)new char [myInfo.mChannelLayoutSize]; myInfo.mChannelLayout->mChannelLayoutTag = formatList[i].mChannelLayoutTag; myInfo.mChannelLayout->mChannelBitmap = 0; myInfo.mChannelLayout->mNumberChannelDescriptions = 0; } delete [] formatList; if (doPrint) { printf ("Playing format: "); myInfo.mDataFormat.Print(); } XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat, AQTestBufferCallback, &myInfo, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &myInfo.mQueue), "AudioQueueNew failed"); UInt32 bufferByteSize; // we need to calculate how many packets we read at a time, and how big a buffer we need // we base this on the size of the packets in the file and an approximate duration for each buffer { bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0); // first check to see what the max size of a packet is - if it is bigger // than our allocation default size, that needs to become larger UInt32 maxPacketSize; size = sizeof(maxPacketSize); XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size"); // adjust buffer size to represent about a half second of audio based on this format CalculateBytesForTime (myInfo.mDataFormat, maxPacketSize, 0.5/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead); if (isFormatVBR) myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead]; else myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM) if (doPrint) printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead); } // (2) If the file has a cookie, we should get it and set it on the AQ size = sizeof(UInt32); result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL); if (!result && size) { char* cookie = new char [size]; XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file"); XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue"); delete [] cookie; } // set ACL if there is one if (myInfo.mChannelLayout) XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, myInfo.mChannelLayout, myInfo.mChannelLayoutSize), "set channel layout on queue"); // prime the queue with some data before starting myInfo.mDone = false; myInfo.mCurrentPacket = 0; for (int i = 0; i < kNumberBuffers; ++i) { XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffers[i]), "AudioQueueAllocateBuffer failed"); AQTestBufferCallback (&myInfo, myInfo.mQueue, myInfo.mBuffers[i]); if (myInfo.mDone) break; } // set the volume of the queue XThrowIfError (AudioQueueSetParameter(myInfo.mQueue, kAudioQueueParam_Volume, volume), "set queue volume"); XThrowIfError (AudioQueueAddPropertyListener (myInfo.mQueue, kAudioQueueProperty_IsRunning, MyAudioQueuePropertyListenerProc, NULL), "add listener"); #if !TARGET_OS_IPHONE if (rate > 0) { UInt32 propValue = 1; XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_EnableTimePitch, &propValue, sizeof(propValue)), "enable time pitch"); propValue = rQuality ? kAudioQueueTimePitchAlgorithm_Spectral : kAudioQueueTimePitchAlgorithm_TimeDomain; XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchAlgorithm, &propValue, sizeof(propValue)), "time pitch algorithm"); propValue = (rate == 1.0f ? 1 : 0); // bypass rate if 1.0 XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchBypass, &propValue, sizeof(propValue)), "bypass time pitch"); if (rate != 1) { XThrowIfError (AudioQueueSetParameter (myInfo.mQueue, kAudioQueueParam_PlayRate, rate), "set playback rate"); } if (doPrint) { printf ("Enable rate-scaled playback (rate = %.2f) using %s algorithm\n", rate, (rQuality ? "Spectral": "Time Domain")); } } #endif // lets start playing now - stop is called in the AQTestBufferCallback when there's // no more to read from the file XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed"); do { CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false); currentTime += .25; if (duration > 0 && currentTime >= duration) break; } while (gIsRunning); CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false); XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed"); XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed"); } catch (CAXException e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); } catch (...) { fprintf(stderr, "Unspecified exception\n"); } return 0; }
static bool LoadWAVFile(const char* filename, ALenum* format, ALvoid** data, ALsizei* size, ALsizei* freq, Float64* estimatedDurationOut) { CFStringRef filenameStr = CFStringCreateWithCString( NULL, filename, kCFStringEncodingUTF8 ); CFURLRef url = CFURLCreateWithFileSystemPath( NULL, filenameStr, kCFURLPOSIXPathStyle, false ); CFRelease( filenameStr ); AudioFileID audioFile; OSStatus error = AudioFileOpenURL( url, kAudioFileReadPermission, kAudioFileWAVEType, &audioFile ); CFRelease( url ); if ( error != noErr ) { fprintf( stderr, "Error opening audio file. %d\n", error ); return false; } AudioStreamBasicDescription basicDescription; UInt32 propertySize = sizeof(basicDescription); error = AudioFileGetProperty( audioFile, kAudioFilePropertyDataFormat, &propertySize, &basicDescription ); if ( error != noErr ) { fprintf( stderr, "Error reading audio file basic description. %d\n", error ); AudioFileClose( audioFile ); return false; } if ( basicDescription.mFormatID != kAudioFormatLinearPCM ) { // Need PCM for Open AL. WAVs are (I believe) by definition PCM, so this check isn't necessary. It's just here // in case I ever use this with another audio format. fprintf( stderr, "Audio file is not linear-PCM. %d\n", basicDescription.mFormatID ); AudioFileClose( audioFile ); return false; } UInt64 audioDataByteCount = 0; propertySize = sizeof(audioDataByteCount); error = AudioFileGetProperty( audioFile, kAudioFilePropertyAudioDataByteCount, &propertySize, &audioDataByteCount ); if ( error != noErr ) { fprintf( stderr, "Error reading audio file byte count. %d\n", error ); AudioFileClose( audioFile ); return false; } Float64 estimatedDuration = 0; propertySize = sizeof(estimatedDuration); error = AudioFileGetProperty( audioFile, kAudioFilePropertyEstimatedDuration, &propertySize, &estimatedDuration ); if ( error != noErr ) { fprintf( stderr, "Error reading estimated duration of audio file. %d\n", error ); AudioFileClose( audioFile ); return false; } ALenum alFormat = 0; if ( basicDescription.mChannelsPerFrame == 1 ) { if ( basicDescription.mBitsPerChannel == 8 ) alFormat = AL_FORMAT_MONO8; else if ( basicDescription.mBitsPerChannel == 16 ) alFormat = AL_FORMAT_MONO16; else { fprintf( stderr, "Expected 8 or 16 bits for the mono channel but got %d\n", basicDescription.mBitsPerChannel ); AudioFileClose( audioFile ); return false; } } else if ( basicDescription.mChannelsPerFrame == 2 ) { if ( basicDescription.mBitsPerChannel == 8 ) alFormat = AL_FORMAT_STEREO8; else if ( basicDescription.mBitsPerChannel == 16 ) alFormat = AL_FORMAT_STEREO16; else { fprintf( stderr, "Expected 8 or 16 bits per channel but got %d\n", basicDescription.mBitsPerChannel ); AudioFileClose( audioFile ); return false; } } else { fprintf( stderr, "Expected 1 or 2 channels in audio file but got %d\n", basicDescription.mChannelsPerFrame ); AudioFileClose( audioFile ); return false; } UInt32 numBytesToRead = audioDataByteCount; void* buffer = malloc( numBytesToRead ); if ( buffer == NULL ) { fprintf( stderr, "Error allocating buffer for audio data of size %u\n", numBytesToRead ); return false; } error = AudioFileReadBytes( audioFile, false, 0, &numBytesToRead, buffer ); AudioFileClose( audioFile ); if ( error != noErr ) { fprintf( stderr, "Error reading audio bytes. %d\n", error ); free(buffer); return false; } if ( numBytesToRead != audioDataByteCount ) { fprintf( stderr, "Tried to read %lld bytes from the audio file but only got %d bytes\n", audioDataByteCount, numBytesToRead ); free(buffer); return false; } *freq = basicDescription.mSampleRate; *size = audioDataByteCount; *format = alFormat; *data = buffer; *estimatedDurationOut = estimatedDuration; return true; }
void DoAQOfflineRender(CFURLRef sourceURL, CFURLRef destinationURL) { // main audio queue code try { AQTestInfo myInfo; myInfo.mDone = false; myInfo.mFlushed = false; myInfo.mCurrentPacket = 0; // get the source file XThrowIfError(AudioFileOpenURL(sourceURL, 0x01/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile), "AudioFileOpen failed"); UInt32 size = sizeof(myInfo.mDataFormat); XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyDataFormat, &size, &myInfo.mDataFormat), "couldn't get file's data format"); printf ("File format: "); myInfo.mDataFormat.Print(); // create a new audio queue output XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat, // The data format of the audio to play. For linear PCM, only interleaved formats are supported. AQTestBufferCallback, // A callback function to use with the playback audio queue. &myInfo, // A custom data structure for use with the callback function. CFRunLoopGetCurrent(), // The event loop on which the callback function pointed to by the inCallbackProc parameter is to be called. // If you specify NULL, the callback is invoked on one of the audio queue’s internal threads. kCFRunLoopCommonModes, // The run loop mode in which to invoke the callback function specified in the inCallbackProc parameter. 0, // Reserved for future use. Must be 0. &myInfo.mQueue), // On output, the newly created playback audio queue object. "AudioQueueNew failed"); UInt32 bufferByteSize; // we need to calculate how many packets we read at a time and how big a buffer we need // we base this on the size of the packets in the file and an approximate duration for each buffer { bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0); // first check to see what the max size of a packet is - if it is bigger // than our allocation default size, that needs to become larger UInt32 maxPacketSize; size = sizeof(maxPacketSize); XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size"); // adjust buffer size to represent about a second of audio based on this format CalculateBytesForTime(myInfo.mDataFormat, maxPacketSize, 1.0/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead); if (isFormatVBR) { myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead]; } else { myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM) } printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead); } // if the file has a magic cookie, we should get it and set it on the AQ size = sizeof(UInt32); OSStatus result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL); if (!result && size) { char* cookie = new char [size]; XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file"); XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue"); delete [] cookie; } // channel layout? OSStatus err = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &size, NULL); AudioChannelLayout *acl = NULL; if (err == noErr && size > 0) { acl = (AudioChannelLayout *)malloc(size); XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &size, acl), "get audio file's channel layout"); XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, acl, size), "set channel layout on queue"); } //allocate the input read buffer XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffer), "AudioQueueAllocateBuffer"); // prepare a canonical interleaved capture format CAStreamBasicDescription captureFormat; captureFormat.mSampleRate = myInfo.mDataFormat.mSampleRate; captureFormat.SetAUCanonical(myInfo.mDataFormat.mChannelsPerFrame, true); // interleaved XThrowIfError(AudioQueueSetOfflineRenderFormat(myInfo.mQueue, &captureFormat, acl), "set offline render format"); ExtAudioFileRef captureFile; // prepare a 16-bit int file format, sample channel count and sample rate CAStreamBasicDescription dstFormat; dstFormat.mSampleRate = myInfo.mDataFormat.mSampleRate; dstFormat.mChannelsPerFrame = myInfo.mDataFormat.mChannelsPerFrame; dstFormat.mFormatID = kAudioFormatLinearPCM; dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian dstFormat.mBitsPerChannel = 16; dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame; dstFormat.mFramesPerPacket = 1; // create the capture file XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, acl, kAudioFileFlags_EraseFile, &captureFile), "ExtAudioFileCreateWithURL"); // set the capture file's client format to be the canonical format from the queue XThrowIfError(ExtAudioFileSetProperty(captureFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &captureFormat), "set ExtAudioFile client format"); // allocate the capture buffer, just keep it at half the size of the enqueue buffer // we don't ever want to pull any faster than we can push data in for render // this 2:1 ratio keeps the AQ Offline Render happy const UInt32 captureBufferByteSize = bufferByteSize / 2; AudioQueueBufferRef captureBuffer; AudioBufferList captureABL; XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, captureBufferByteSize, &captureBuffer), "AudioQueueAllocateBuffer"); captureABL.mNumberBuffers = 1; captureABL.mBuffers[0].mData = captureBuffer->mAudioData; captureABL.mBuffers[0].mNumberChannels = captureFormat.mChannelsPerFrame; // lets start playing now - stop is called in the AQTestBufferCallback when there's // no more to read from the file XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed"); AudioTimeStamp ts; ts.mFlags = kAudioTimeStampSampleTimeValid; ts.mSampleTime = 0; // we need to call this once asking for 0 frames XThrowIfError(AudioQueueOfflineRender(myInfo.mQueue, &ts, captureBuffer, 0), "AudioQueueOfflineRender"); // we need to enqueue a buffer after the queue has started AQTestBufferCallback(&myInfo, myInfo.mQueue, myInfo.mBuffer); while (true) { UInt32 reqFrames = captureBufferByteSize / captureFormat.mBytesPerFrame; XThrowIfError(AudioQueueOfflineRender(myInfo.mQueue, &ts, captureBuffer, reqFrames), "AudioQueueOfflineRender"); captureABL.mBuffers[0].mData = captureBuffer->mAudioData; captureABL.mBuffers[0].mDataByteSize = captureBuffer->mAudioDataByteSize; UInt32 writeFrames = captureABL.mBuffers[0].mDataByteSize / captureFormat.mBytesPerFrame; printf("t = %.f: AudioQueueOfflineRender: req %d fr/%d bytes, got %ld fr/%d bytes\n", ts.mSampleTime, (int)reqFrames, (int)captureBufferByteSize, writeFrames, (int)captureABL.mBuffers[0].mDataByteSize); XThrowIfError(ExtAudioFileWrite(captureFile, writeFrames, &captureABL), "ExtAudioFileWrite"); if (myInfo.mFlushed) break; ts.mSampleTime += writeFrames; } CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false); XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed"); XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed"); XThrowIfError(ExtAudioFileDispose(captureFile), "ExtAudioFileDispose failed"); if (myInfo.mPacketDescs) delete [] myInfo.mPacketDescs; if (acl) free(acl); } catch (CAXException e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); } return; }
int main (int argc, const char *argv[]) { CFURLRef audioFileURLRef; OSStatus ret; audioFileURLRef = CFURLCreateWithFileSystemPath ( kCFAllocatorDefault, /* CFSTR ("../misc/test.wav"), */ CFSTR ("../misc/alin.wav"), kCFURLPOSIXPathStyle, FALSE ); ret = AudioFileOpenURL( audioFileURLRef, kAudioFileReadWritePermission, 0, &myInfo.mAudioFile); if (ret != noErr) { printf("fail to open audio file %x\n", ret); return 1; } UInt32 propSize = sizeof(myInfo.mDataFormat); ret = AudioFileGetProperty( myInfo.mAudioFile, kAudioFilePropertyDataFormat, &propSize, &myInfo.mDataFormat ); if (ret != noErr) { printf("AudioFileGetProperty error code %d\n", ret); return 1; } printf("sample rate: %f\n" "mFormatID: %u\n" "mFormatFlags: %u\n" "mBytesPerPacket: %u\n" "mChannelsPerFrame: %u\n", myInfo.mDataFormat.mSampleRate, myInfo.mDataFormat.mFormatID, myInfo.mDataFormat.mFormatFlags, myInfo.mDataFormat.mBytesPerPacket, myInfo.mDataFormat.mChannelsPerFrame ); // Instantiate an audio queue object ret = AudioQueueNewOutput( &myInfo.mDataFormat, AQTestBufferCallback, &myInfo, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &myInfo.mQueue ); if (ret != noErr) { printf("AudioQueueNewOutput error code %d\n", ret); return 1; } AudioQueueAddPropertyListener(myInfo.mQueue, kAudioQueueProperty_IsRunning, AudioEnginePropertyListenerProc, &myInfo); /* FIXME allocate AudioQueue buffer */ int i; for (i = 0; i < 3; i++) { AudioQueueAllocateBuffer(myInfo.mQueue, 441 * 4, &myInfo.mBuffers[i]); } AudioQueueStart(myInfo.mQueue, NULL); printf("Run loop\n"); // create a system sound ID to represent the sound file /* OSStatus error = AudioServicesCreateSystemSoundID (myURLRef, &mySSID); */ // Register the sound completion callback. // Again, useful when you need to free memory after playing. /* AudioServicesAddSystemSoundCompletion ( */ /* mySSID, */ /* NULL, */ /* NULL, */ /* MyCompletionCallback, */ /* (void *) myURLRef */ /* ); */ // Play the sound file. /* AudioServicesPlaySystemSound (mySSID); */ // Invoke a run loop on the current thread to keep the application // running long enough for the sound to play; the sound completion // callback later stops this run loop. CFRunLoopRun (); return 0; }
bool load(CFURLRef url) { OSStatus status; memset(&aqData,0,sizeof(aqData)); timeBase = 0; status = AudioFileOpenURL(url,kAudioFileReadPermission,0,&aqData.mAudioFile); checkStatus(status); if( status != noErr ) return false; UInt32 dataFormatSize = sizeof (aqData.mDataFormat); // 1 status = AudioFileGetProperty ( // 2 aqData.mAudioFile, // 3 kAudioFilePropertyDataFormat, // 4 &dataFormatSize, // 5 &aqData.mDataFormat // 6 ); checkStatus(status); status = AudioQueueNewOutput ( // 1 &aqData.mDataFormat, // 2 HandleOutputBuffer, // 3 &aqData, // 4 CFRunLoopGetCurrent (), // 5 kCFRunLoopCommonModes, // 6 0, // 7 &aqData.mQueue // 8 ); checkStatus(status); UInt32 maxPacketSize; UInt32 propertySize = sizeof (maxPacketSize); status = AudioFileGetProperty ( // 1 aqData.mAudioFile, // 2 kAudioFilePropertyPacketSizeUpperBound, // 3 &propertySize, // 4 &maxPacketSize // 5 ); checkStatus(status); deriveBufferSize ( // 6 aqData.mDataFormat, // 7 maxPacketSize, // 8 0.5, // 9 &aqData.bufferByteSize, // 10 &aqData.mNumPacketsToRead // 11 ); bool isFormatVBR = ( // 1 aqData.mDataFormat.mBytesPerPacket == 0 || aqData.mDataFormat.mFramesPerPacket == 0 ); if (isFormatVBR) { // 2 aqData.mPacketDescs = (AudioStreamPacketDescription*) malloc ( aqData.mNumPacketsToRead * sizeof (AudioStreamPacketDescription) ); } else { // 3 aqData.mPacketDescs = NULL; } UInt32 cookieSize = sizeof (UInt32); // 1 OSStatus couldNotGetProperty = // 2 AudioFileGetPropertyInfo ( // 3 aqData.mAudioFile, // 4 kAudioFilePropertyMagicCookieData, // 5 &cookieSize, // 6 NULL // 7 ); // checkStatus(couldNotGetProperty); if (!couldNotGetProperty && cookieSize) { // 8 char* magicCookie = (char *) malloc (cookieSize); status = AudioFileGetProperty ( // 9 aqData.mAudioFile, // 10 kAudioFilePropertyMagicCookieData, // 11 &cookieSize, // 12 magicCookie // 13 ); checkStatus(status); status = AudioQueueSetProperty ( // 14 aqData.mQueue, // 15 kAudioQueueProperty_MagicCookie, // 16 magicCookie, // 17 cookieSize // 18 ); checkStatus(status); free (magicCookie); // 19 } return true; }
int main(int argc, const char *argv[]) { MyPlayer player = {0}; CFURLRef myFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, kPlaybackFileLocation, kCFURLPOSIXPathStyle, false); // open the audio file // CheckError(AudioFileOpenURL(myFileURL, fsRdPerm, 0, &player.playbackFile), "AudioFileOpenURL failed"); CheckError(AudioFileOpenURL(myFileURL, kAudioFileReadPermission, 0, &player.playbackFile), "AudioFileOpenURL failed"); CFRelease(myFileURL); // get the audio data format from the file AudioStreamBasicDescription dataFormat; UInt32 propSize = sizeof(dataFormat); CheckError(AudioFileGetProperty(player.playbackFile, kAudioFilePropertyDataFormat, &propSize, &dataFormat), "couldn't get file's data format"); // create a output (playback) queue AudioQueueRef queue; CheckError(AudioQueueNewOutput(&dataFormat, // ASBD MyAQOutputCallback, // Callback &player, // user data NULL, // run loop NULL, // run loop mode 0, // flags (always 0) &queue), // output: reference to AudioQueue object "AudioQueueNewOutput failed"); // adjust buffer size to represent about a half second (0.5) of audio based on this format UInt32 bufferByteSize; CalculateBytesForTime(player.playbackFile, dataFormat, 0.5, &bufferByteSize, &player.numPacketsToRead); // check if we are dealing with a VBR file. ASBDs for VBR files always have // mBytesPerPacket and mFramesPerPacket as 0 since they can fluctuate at any time. // If we are dealing with a VBR file, we allocate memory to hold the packet descriptions bool isFormatVBR = (dataFormat.mBytesPerPacket == 0 || dataFormat.mFramesPerPacket == 0); if (isFormatVBR) player.packetDescs = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * player.numPacketsToRead); else player.packetDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM) // get magic cookie from file and set on queue MyCopyEncoderCookieToQueue(player.playbackFile, queue); // allocate the buffers and prime the queue with some data before starting AudioQueueBufferRef buffers[kNumberPlaybackBuffers]; player.isDone = false; player.packetPosition = 0; int i; for (i = 0; i < kNumberPlaybackBuffers; ++i) { CheckError(AudioQueueAllocateBuffer(queue, bufferByteSize, &buffers[i]), "AudioQueueAllocateBuffer failed"); // manually invoke callback to fill buffers with data MyAQOutputCallback(&player, queue, buffers[i]); // EOF (the entire file's contents fit in the buffers) if (player.isDone) break; } //CheckError(AudioQueueAddPropertyListener(aqp.queue, kAudioQueueProperty_IsRunning, MyAQPropertyListenerCallback, &aqp), "AudioQueueAddPropertyListener(kAudioQueueProperty_IsRunning) failed"); // start the queue. this function returns immedatly and begins // invoking the callback, as needed, asynchronously. CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed"); // and wait printf("Playing...\n"); do { CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false); } while (!player.isDone /*|| gIsRunning*/); // isDone represents the state of the Audio File enqueuing. This does not mean the // Audio Queue is actually done playing yet. Since we have 3 half-second buffers in-flight // run for continue to run for a short additional time so they can be processed CFRunLoopRunInMode(kCFRunLoopDefaultMode, 2, false); // end playback player.isDone = true; CheckError(AudioQueueStop(queue, TRUE), "AudioQueueStop failed"); cleanup: AudioQueueDispose(queue, TRUE); AudioFileClose(player.playbackFile); return 0; }
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate, UInt32 outputBitRate) { AudioFileID sourceFileID = 0; AudioFileID destinationFileID = 0; AudioConverterRef converter = NULL; Boolean canResumeFromInterruption = true; // we can continue unless told otherwise CAStreamBasicDescription srcFormat, dstFormat; AudioFileIO afio = {}; char *outputBuffer = NULL; AudioStreamPacketDescription *outputPacketDescriptions = NULL; OSStatus error = noErr; // in this sample we should never be on the main thread here assert(![NSThread isMainThread]); // transition thread state to kStateRunning before continuing printf("\nDoConvertFile\n"); try { // get the source file XThrowIfError(AudioFileOpenURL(sourceURL, kAudioFileReadPermission, 0, &sourceFileID), "AudioFileOpenURL failed"); // get the source data format UInt32 size = sizeof(srcFormat); XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyDataFormat, &size, &srcFormat), "couldn't get source data format"); // setup the output file format dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate if (outputFormat == kAudioFormatLinearPCM) { // if the output format is PC create a 16-bit int PCM file format description as an example dstFormat.mFormatID = outputFormat; dstFormat.mChannelsPerFrame = srcFormat.NumberChannels(); dstFormat.mBitsPerChannel = 16; dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame; dstFormat.mFramesPerPacket = 1; dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian } else { // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo dstFormat.mFormatID = outputFormat; dstFormat.mChannelsPerFrame = (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1 // use AudioFormat API to fill out the rest of the description size = sizeof(dstFormat); XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format"); } printf("Source File format: "); srcFormat.Print(); printf("Destination format: "); dstFormat.Print(); // create the AudioConverter XThrowIfError(AudioConverterNew(&srcFormat, &dstFormat, &converter), "AudioConverterNew failed!"); // if the source has a cookie, get it and set it on the Audio Converter ReadCookie(sourceFileID, converter); // get the actual formats back from the Audio Converter size = sizeof(srcFormat); XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentInputStreamDescription, &size, &srcFormat), "AudioConverterGetProperty kAudioConverterCurrentInputStreamDescription failed!"); size = sizeof(dstFormat); XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentOutputStreamDescription, &size, &dstFormat), "AudioConverterGetProperty kAudioConverterCurrentOutputStreamDescription failed!"); printf("Formats returned from AudioConverter:\n"); printf(" Source format: "); srcFormat.Print(); printf(" Destination File format: "); dstFormat.Print(); // if encoding to AAC set the bitrate // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data // when you explicitly set the bit rate and the sample rate, this tells the encoder to stick with both bit rate and sample rate // but there are combinations (also depending on the number of channels) which will not be allowed // if you do not explicitly set a bit rate the encoder will pick the correct value for you depending on samplerate and number of channels // bit rate also scales with the number of channels, therefore one bit rate per sample rate can be used for mono cases // and if you have stereo or more, you can multiply that number by the number of channels. if (outputBitRate == 0) { outputBitRate = 192000; // 192kbs } if (dstFormat.mFormatID == kAudioFormatMPEG4AAC) { UInt32 propSize = sizeof(outputBitRate); // set the bit rate depending on the samplerate chosen XThrowIfError(AudioConverterSetProperty(converter, kAudioConverterEncodeBitRate, propSize, &outputBitRate), "AudioConverterSetProperty kAudioConverterEncodeBitRate failed!"); // get it back and print it out AudioConverterGetProperty(converter, kAudioConverterEncodeBitRate, &propSize, &outputBitRate); printf ("AAC Encode Bitrate: %u\n", (unsigned int)outputBitRate); } // can the Audio Converter resume conversion after an interruption? // this property may be queried at any time after construction of the Audio Converter after setting its output format // there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer // construction time since it means less code to execute during or after interruption time UInt32 canResume = 0; size = sizeof(canResume); error = AudioConverterGetProperty(converter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume); if (noErr == error) { // we recieved a valid return value from the GetProperty call // if the property's value is 1, then the codec CAN resume work following an interruption // if the property's value is 0, then interruptions destroy the codec's state and we're done if (0 == canResume) canResumeFromInterruption = false; printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN")); } else { // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM), // then the codec being used is not a hardware codec so we're not concerned about codec state // we are always going to be able to resume conversion after an interruption if (kAudioConverterErr_PropertyNotSupported == error) { printf("kAudioConverterPropertyCanResumeFromInterruption property not supported - see comments in source for more info.\n"); } else { printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld, paramErr is OK if PCM\n", error); } error = noErr; } // create the destination file XThrowIfError(AudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, kAudioFileFlags_EraseFile, &destinationFileID), "AudioFileCreateWithURL failed!"); // set up source buffers and data proc info struct afio.srcFileID = sourceFileID; afio.srcBufferSize = 32768; afio.srcBuffer = new char [afio.srcBufferSize]; afio.srcFilePos = 0; afio.srcFormat = srcFormat; if (srcFormat.mBytesPerPacket == 0) { // if the source format is VBR, we need to get the maximum packet size // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size // in the file (without actually scanning the whole file to find the largest packet, // as may happen with kAudioFilePropertyMaximumPacketSize) size = sizeof(afio.srcSizePerPacket); XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyPacketSizeUpperBound, &size, &afio.srcSizePerPacket), "AudioFileGetProperty kAudioFilePropertyPacketSizeUpperBound failed!"); // how many packets can we read for our buffer size? afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket; // allocate memory for the PacketDescription structures describing the layout of each packet afio.packetDescriptions = new AudioStreamPacketDescription [afio.numPacketsPerRead]; } else { // CBR source format afio.srcSizePerPacket = srcFormat.mBytesPerPacket; afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket; afio.packetDescriptions = NULL; } // set up output buffers UInt32 outputSizePerPacket = dstFormat.mBytesPerPacket; // this will be non-zero if the format is CBR UInt32 theOutputBufSize = 32768; outputBuffer = new char[theOutputBufSize]; if (outputSizePerPacket == 0) { // if the destination format is VBR, we need to get max size per packet from the converter size = sizeof(outputSizePerPacket); XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterPropertyMaximumOutputPacketSize, &size, &outputSizePerPacket), "AudioConverterGetProperty kAudioConverterPropertyMaximumOutputPacketSize failed!"); // allocate memory for the PacketDescription structures describing the layout of each packet outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket]; } UInt32 numOutputPackets = theOutputBufSize / outputSizePerPacket; // if the destination format has a cookie, get it and set it on the output file WriteCookie(converter, destinationFileID); // write destination channel layout if (srcFormat.mChannelsPerFrame > 2) { WriteDestinationChannelLayout(converter, sourceFileID, destinationFileID); } UInt64 totalOutputFrames = 0; // used for debgging printf SInt64 outputFilePos = 0; // loop to convert data printf("Converting...\n"); while (1) { // set up output buffer list AudioBufferList fillBufList; fillBufList.mNumberBuffers = 1; fillBufList.mBuffers[0].mNumberChannels = dstFormat.mChannelsPerFrame; fillBufList.mBuffers[0].mDataByteSize = theOutputBufSize; fillBufList.mBuffers[0].mData = outputBuffer; // this will block if we're interrupted Boolean wasInterrupted = NO; if ((error || wasInterrupted) && (false == canResumeFromInterruption)) { // this is our interruption termination condition // an interruption has occured but the Audio Converter cannot continue error = kMyAudioConverterErr_CannotResumeFromInterruptionError; break; } // convert data UInt32 ioOutputDataPackets = numOutputPackets; printf("AudioConverterFillComplexBuffer...\n"); error = AudioConverterFillComplexBuffer(converter, EncoderDataProc, &afio, &ioOutputDataPackets, &fillBufList, outputPacketDescriptions); // if interrupted in the process of the conversion call, we must handle the error appropriately if (error) { if (kAudioConverterErr_HardwareInUse == error) { printf("Audio Converter returned kAudioConverterErr_HardwareInUse!\n"); } else { XThrowIfError(error, "AudioConverterFillComplexBuffer error!"); } } else { if (ioOutputDataPackets == 0) { // this is the EOF conditon error = noErr; break; } } if (noErr == error) { // write to output file UInt32 inNumBytes = fillBufList.mBuffers[0].mDataByteSize; XThrowIfError(AudioFileWritePackets(destinationFileID, false, inNumBytes, outputPacketDescriptions, outputFilePos, &ioOutputDataPackets, outputBuffer), "AudioFileWritePackets failed!"); printf("Convert Output: Write %lu packets at position %lld, size: %ld\n", ioOutputDataPackets, outputFilePos, inNumBytes); // advance output file packet position outputFilePos += ioOutputDataPackets; if (dstFormat.mFramesPerPacket) { // the format has constant frames per packet totalOutputFrames += (ioOutputDataPackets * dstFormat.mFramesPerPacket); } else if (outputPacketDescriptions != NULL) { // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet) for (UInt32 i = 0; i < ioOutputDataPackets; ++i) totalOutputFrames += outputPacketDescriptions[i].mVariableFramesInPacket; } } } // while if (noErr == error) { // write out any of the leading and trailing frames for compressed formats only if (dstFormat.mBitsPerChannel == 0) { // our output frame count should jive with printf("Total number of output frames counted: %lld\n", totalOutputFrames); WritePacketTableInfo(converter, destinationFileID); } // write the cookie again - sometimes codecs will update cookies at the end of a conversion WriteCookie(converter, destinationFileID); } } catch (CAXException e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); error = e.mError; } // cleanup if (converter) AudioConverterDispose(converter); if (destinationFileID) AudioFileClose(destinationFileID); if (sourceFileID) AudioFileClose(sourceFileID); if (afio.srcBuffer) delete [] afio.srcBuffer; if (afio.packetDescriptions) delete [] afio.packetDescriptions; if (outputBuffer) delete [] outputBuffer; if (outputPacketDescriptions) delete [] outputPacketDescriptions; return error; }
static MADErr mainAIFF(void *unused, OSType order, InstrData *InsHeader, sData **sample, short *sampleID, CFURLRef AlienFileURL, PPInfoPlug *thePPInfoPlug) { MADErr myErr = MADNoErr; //char *AlienFile; switch(order) { #if 0 case 'IMPL': { char *theSound; long lS, lE; short sS; unsigned long rate; Boolean stereo; FSSpec newFile; myErr = ConvertDataToWAVE(*AlienFileFSSpec, &newFile, thePPInfoPlug); if (myErr == MADNoErr) { theSound = ConvertWAV(&newFile, &lS, &lE, &sS, &rate, &stereo); if (theSound) myErr = inAddSoundToMAD(theSound, lS, lE, sS, 60, rate, stereo, AlienFileFSSpec->name, InsHeader, sample, sampleID); else myErr = MADNeedMemory; FSpDelete(&newFile); } } break; #endif case MADPlugImport: { AudioFileID theInID; OSStatus myStat = AudioFileOpenURL(AlienFileURL, kAudioFileReadPermission, 0, &theInID); if (myStat != noErr) { myErr = MADReadingErr; } else { AudioFileClose(theInID); myErr = MADOrderNotImplemented; } } case MADPlugTest: { AudioFileID audioFile; OSStatus res; res = AudioFileOpenURL(AlienFileURL, kAudioFileReadPermission, kAudioFileAIFFType, &audioFile); if (res != noErr) { res = AudioFileOpenURL(AlienFileURL, kAudioFileReadPermission, kAudioFileAIFCType, &audioFile); if (res != noErr) { myErr = MADFileNotSupportedByThisPlug; } else { AudioFileClose(audioFile); } } else { AudioFileClose(audioFile); } } break; case MADPlugExport: if (*sampleID >= 0) { char* data = NULL; sData *curData = sample[*sampleID]; AudioStreamBasicDescription asbd = {0}; asbd.mSampleRate = curData->c2spd; asbd.mFormatID = kAudioFormatLinearPCM; asbd.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagIsBigEndian; asbd.mBitsPerChannel = curData->amp; asbd.mChannelsPerFrame = curData->stereo ? 2 : 1; asbd.mFramesPerPacket = 1; asbd.mBytesPerFrame = asbd.mBitsPerChannel * asbd.mChannelsPerFrame / 8; asbd.mBytesPerPacket = asbd.mBytesPerFrame * asbd.mFramesPerPacket; AudioFileID audioFile; OSStatus res; if (curData->amp == 16) { data = malloc(curData->size); if (!data) return MADNeedMemory; memcpy(data, curData->data, curData->size); dispatch_apply(curData->size / 2, dispatch_get_global_queue(0, 0), ^(size_t i) { PPBE16(&((short*)data)[i]); });
SourceFile::SourceFile( DataSourceRef dataSourceRef ) : Source() { OSStatus err = noErr; AudioFileID aFileRef; if( dataSourceRef->isFilePath() ) { ::CFStringRef pathString = cocoa::createCfString( dataSourceRef->getFilePath() ); ::CFURLRef urlRef = ::CFURLCreateWithFileSystemPath( kCFAllocatorDefault, pathString, kCFURLPOSIXPathStyle, false ); err = AudioFileOpenURL( urlRef, kAudioFileReadPermission/*fsRdPerm*/, 0, &aFileRef ); ::CFRelease( pathString ); ::CFRelease( urlRef ); if( err ) { #if defined(CINDER_MAC) //TODO: find iphone equivalent of fnfErr if( err == fnfErr ) { throw IoExceptionSourceNotFound(); } #endif throw IoExceptionFailedLoad(); } } else if( dataSourceRef->isUrl() ) { ::CFURLRef urlRef = cocoa::createCfUrl( dataSourceRef->getUrl() ); err = AudioFileOpenURL( urlRef, kAudioFileReadPermission/*fsRdPerm*/, 0, &aFileRef ); ::CFRelease( urlRef ); if( err ) { throw IoExceptionFailedLoad(); } } mFileRef = shared_ptr<OpaqueAudioFileID>( aFileRef, AudioFileClose ); //load header info AudioStreamBasicDescription nativeFormatDescription; UInt32 size = sizeof( AudioStreamBasicDescription ); err = AudioFileGetProperty( aFileRef, kAudioFilePropertyDataFormat, &size, &nativeFormatDescription ); if( err ) { throw IoExceptionFailedLoad(); } loadFromCaAudioStreamBasicDescription( this, &nativeFormatDescription ); size = sizeof( uint64_t ); err = AudioFileGetProperty( aFileRef, kAudioFilePropertyAudioDataPacketCount, &size, &mPacketCount ); if( err ) { throw IoExceptionFailedLoad(); } size = sizeof( uint64_t ); err = AudioFileGetProperty( aFileRef, kAudioFilePropertyAudioDataByteCount, &size, &mByteCount ); if( err ) { throw IoExceptionFailedLoad(); } size = sizeof( uint32_t ); err = AudioFileGetProperty( aFileRef, kAudioFilePropertyMaximumPacketSize, &size, &mMaxPacketSize ); if( err ) { throw IoExceptionFailedLoad(); } size = sizeof( double ); err = AudioFileGetProperty( aFileRef, kAudioFilePropertyEstimatedDuration, &size, &mDuration ); if( err ) { throw IoExceptionFailedLoad(); } }
void ConstructOutputFormatFromArgs ( CFURLRef inputFileURL, OSType fileType, OSType format, Float64 sampleRate, CAStreamBasicDescription &inputFormat, UInt32 bitDepth, CAStreamBasicDescription &outputFormat) { AudioFileID infile; OSStatus err = AudioFileOpenURL(inputFileURL, kAudioFileReadPermission, 0, &infile); XThrowIfError (err, "AudioFileOpen"); // get the input file format GetFormatFromInputFile (infile, inputFormat); if (inputFormat.mFormatID != kAudioFormatLinearPCM && sampleRate > 0) { printf ("Can only specify sample rate with linear pcm input file\n"); UsageString(1); } // set up the output file format if (!format) { if (sampleRate > 0) { outputFormat = inputFormat; outputFormat.mSampleRate = sampleRate; } else { if (inputFormat.mFormatID != kAudioFormatLinearPCM) format = kAudioFormatLinearPCM; else format = kAudioFormatAppleIMA4; } } if (format) { if (format == kAudioFormatLinearPCM) { outputFormat.mFormatID = format; outputFormat.mSampleRate = inputFormat.mSampleRate; outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame; outputFormat.mBitsPerChannel = (bitDepth) ? bitDepth : 16; outputFormat.mBytesPerPacket = inputFormat.mChannelsPerFrame * (outputFormat.mBitsPerChannel / 8); outputFormat.mFramesPerPacket = 1; outputFormat.mBytesPerFrame = outputFormat.mBytesPerPacket; if (fileType == kAudioFileWAVEType) outputFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; else outputFormat.mFormatFlags = kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; } else { // need to set at least these fields for kAudioFormatProperty_FormatInfo outputFormat.mFormatID = format; outputFormat.mSampleRate = inputFormat.mSampleRate; outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame; // use AudioFormat API to fill out the rest. UInt32 size = sizeof(outputFormat); err = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat); XThrowIfError (err, "AudioFormatGetProperty kAudioFormatProperty_FormatInfo"); } } AudioFileClose (infile); }
void playFile(const char* filePath) { CFURLRef audioFileURL = CFURLCreateFromFileSystemRepresentation(NULL, (UInt8*) filePath, strlen (filePath), false); OSStatus result = AudioFileOpenURL(audioFileURL, fsRdPerm, 0, &aqData.mAudioFile); CFRelease (audioFileURL); UInt32 dataFormatSize = sizeof (aqData.mDataFormat); AudioFileGetProperty(aqData.mAudioFile, kAudioFilePropertyDataFormat, &dataFormatSize, &aqData.mDataFormat); AudioQueueNewOutput(&aqData.mDataFormat, HandleOutputBuffer, &aqData, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &aqData.mQueue); UInt32 maxPacketSize; UInt32 propertySize = sizeof (maxPacketSize); AudioFileGetProperty(aqData.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &propertySize, &maxPacketSize); DeriveBufferSize(&aqData.mDataFormat, maxPacketSize, 0.5, &aqData.bufferByteSize, &aqData.mNumPacketsToRead); bool isFormatVBR = (aqData.mDataFormat.mBytesPerPacket == 0 || aqData.mDataFormat.mFramesPerPacket == 0); if (isFormatVBR) { // LOG("%s\n","VBR"); aqData.mPacketDescs = (AudioStreamPacketDescription*) malloc (aqData.mNumPacketsToRead * sizeof (AudioStreamPacketDescription)); } else { aqData.mPacketDescs = NULL; } UInt32 cookieSize = sizeof (UInt32); bool couldNotGetProperty = AudioFileGetPropertyInfo (aqData.mAudioFile, kAudioFilePropertyMagicCookieData, &cookieSize, NULL); if (!couldNotGetProperty && cookieSize) { char* magicCookie = (char *) malloc (cookieSize); AudioFileGetProperty (aqData.mAudioFile, kAudioFilePropertyMagicCookieData, &cookieSize, magicCookie); AudioQueueSetProperty (aqData.mQueue, kAudioQueueProperty_MagicCookie, magicCookie, cookieSize); free (magicCookie); } aqData.mCurrentPacket = 0; aqData.mIsRunning = true; //LOG("%d\n", aqData.mNumPacketsToRead); for (int i = 0; i < kNumberBuffers; ++i) { AudioQueueAllocateBuffer (aqData.mQueue, aqData.bufferByteSize, &aqData.mBuffers[i]); HandleOutputBuffer (&aqData, aqData.mQueue, aqData.mBuffers[i]); } Float32 gain = 1.0; // Optionally, allow user to override gain setting here AudioQueueSetParameter (aqData.mQueue, kAudioQueueParam_Volume, gain); //LOG("%s\n","Starting play"); // IMPORTANT NOTE : This value must be set // Before the call to HandleOutputBuffer //a qData.mIsRunning = true; AudioQueueStart (aqData.mQueue, NULL); }
int main (int argc, const char *argv[]) { OSStatus result; State *state = NULL; if (argc != 2) { printf("Usage: play <file>\n"); goto error; } state = StateCreate(); // // open the audio file // CFURLRef fileURL = CFURLCreateFromFileSystemRepresentation( kCFAllocatorDefault, (const UInt8*)argv[1], strlen(argv[1]), false ); if (!fileURL) { Error("Invalid filename"); goto error; } result = AudioFileOpenURL( fileURL, kAudioFileReadPermission, 0, &state->audioFile ); CFRelease(fileURL); if (result) { Error("Invalid audio file"); goto error; } // // determine properties of stream including maximum packet size // UInt32 propertyDataSize; AudioStreamBasicDescription streamDescription; propertyDataSize = sizeof(streamDescription); result = AudioFileGetProperty( state->audioFile, kAudioFilePropertyDataFormat, &propertyDataSize, &streamDescription ); if (result) { Error(NULL); goto error; } UInt32 maximumPacketSize; propertyDataSize = sizeof(maximumPacketSize); result = AudioFileGetProperty( state->audioFile, kAudioFilePropertyMaximumPacketSize, &propertyDataSize, &maximumPacketSize ); if (result) { Error(NULL); goto error; } // // initialize the audio queue and allocate buffers // result = AudioQueueNewOutput( &streamDescription, AudioQueueOutput, state, CFRunLoopGetCurrent(), kCFRunLoopDefaultMode, 0, &state->audioQueue ); if (result) { Error("Failed to initialize audio queue"); goto error; } state->audioBufferSize = maximumPacketSize * PACKETS_PER_BUFFER; AudioQueueBufferRef audioQueueBuffers[NUM_BUFFERS]; for (int i = 0; i < NUM_BUFFERS; ++i) { result = AudioQueueAllocateBufferWithPacketDescriptions( state->audioQueue, state->audioBufferSize, PACKETS_PER_BUFFER, &audioQueueBuffers[i] ); if (result) { Error("Failed to initialize audio queue buffer"); goto error; } } result = AudioQueueAddPropertyListener( state->audioQueue, kAudioQueueProperty_IsRunning, AudioQueuePropertyListener, NULL ); // // prime and start the audio queue // AudioQueueOutput( state, state->audioQueue, audioQueueBuffers[0] ); result = AudioQueueStart( state->audioQueue, NULL ); if (result) { Error("Failed to start audio queue"); goto error; } // // start the run loop that will dispatch audio queue callbacks // CFRunLoopRun(); StateDestroy(state); return 0; error: if (state) { StateDestroy(state); } return 1; }
void DiskIn::initWithAudioFile(const char* audioFilePath, const bool loopFlag, const double startTime, const UGen::DoneAction doneAction) throw() { Text path; // this needs to be here so it doesn't get garbage collected too early if(audioFilePath[0] != '/') { path = NSUtilities::pathInDirectory(NSUtilities::Bundle, audioFilePath); audioFilePath = path.getArray(); } OSStatus result; UInt32 dataSize; CFURLRef audioFileURL; audioFileURL = CFURLCreateFromFileSystemRepresentation(NULL, (const UInt8*)audioFilePath, strlen(audioFilePath), false); AudioFileID audioFile = 0; result = AudioFileOpenURL (audioFileURL, kAudioFileReadPermission, 0, &audioFile); CFRelease(audioFileURL); if (result != noErr) { printf("DiskIn: error: Could not open file: %s err=%d\n", audioFilePath, (int)result); audioFile = 0; } if(audioFile) { AudioStreamBasicDescription format; dataSize = sizeof(format); result = AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat, &dataSize, &format); if (result != noErr) { printf("DiskIn: error: Could not get data format: %s err=%d\n", audioFilePath, (int)result); AudioFileClose(audioFile); audioFile = 0; } else if(format.mFormatID != kAudioFormatLinearPCM) { printf("DiskIn: error: Only PCM formats supported\\n"); AudioFileClose(audioFile); audioFile = 0; } else { if(format.mSampleRate != UGen::getSampleRate()) { printf("DiskIn: warning: Sample rate mismatch - resampling is not yet implemented\\n"); } if((format.mFormatFlags & kAudioFormatFlagIsFloat) != 0) { initInternal(format.mChannelsPerFrame); if((format.mFormatFlags & kAudioFormatFlagIsBigEndian) != 0) { generateFromProxyOwner(new DiskInUGenInternalFloatBigEndian(audioFile, format, loopFlag, startTime, doneAction)); return; } else { generateFromProxyOwner(new DiskInUGenInternalFloatLittleEndian(audioFile, format, loopFlag, startTime, doneAction)); return; } } else if((format.mFormatFlags & kAudioFormatFlagIsBigEndian) != 0) { // aif and other big edian? if(format.mBitsPerChannel == 16) { initInternal(format.mChannelsPerFrame); generateFromProxyOwner(new DiskInUGenInternalAiff16(audioFile, format, loopFlag, startTime, doneAction)); return; } else if(format.mBitsPerChannel == 32) { initInternal(format.mChannelsPerFrame); generateFromProxyOwner(new DiskInUGenInternalAiff32(audioFile, format, loopFlag, startTime, doneAction)); return; } else if(format.mBitsPerChannel == 24) { initInternal(format.mChannelsPerFrame); generateFromProxyOwner(new DiskInUGenInternalAiff24(audioFile, format, loopFlag, startTime, doneAction)); return; } else { printf("DiskIn: error: Sound file format not yet supported."); AudioFileClose(audioFile); audioFile = 0; } } else { // wav and other little edian? if(format.mBitsPerChannel == 16) { initInternal(format.mChannelsPerFrame); generateFromProxyOwner(new DiskInUGenInternalWav16(audioFile, format, loopFlag, startTime, doneAction)); return; } else if(format.mBitsPerChannel == 32) { initInternal(format.mChannelsPerFrame); generateFromProxyOwner(new DiskInUGenInternalWav32(audioFile, format, loopFlag, startTime, doneAction)); return; } else if(format.mBitsPerChannel == 24) { initInternal(format.mChannelsPerFrame); generateFromProxyOwner(new DiskInUGenInternalWav24(audioFile, format, loopFlag, startTime, doneAction)); return; } else { printf("DiskIn: error: Sound file format not yet supported."); AudioFileClose(audioFile); audioFile = 0; } } } } }
void* MyGetOpenALAudioData(CFURLRef inFileURL, ALsizei *outDataSize, ALenum *outDataFormat, ALsizei* outSampleRate) { OSStatus err = noErr; UInt64 fileDataSize = 0; AudioStreamBasicDescription theFileFormat; UInt32 thePropertySize = sizeof(theFileFormat); AudioFileID afid = 0; void* theData = NULL; // Open a file with ExtAudioFileOpen() err = AudioFileOpenURL(inFileURL, kAudioFileReadPermission, 0, &afid); if(err) { printf("MyGetOpenALAudioData: AudioFileOpenURL FAILED, Error = %ld\n", err); goto Exit; } // Get the audio data format err = AudioFileGetProperty(afid, kAudioFilePropertyDataFormat, &thePropertySize, &theFileFormat); if(err) { printf("MyGetOpenALAudioData: AudioFileGetProperty(kAudioFileProperty_DataFormat) FAILED, Error = %ld\n", err); goto Exit; } if (theFileFormat.mChannelsPerFrame > 2) { printf("MyGetOpenALAudioData - Unsupported Format, channel count is greater than stereo\n"); goto Exit; } if ((theFileFormat.mFormatID != kAudioFormatLinearPCM) || (!TestAudioFormatNativeEndian(theFileFormat))) { printf("MyGetOpenALAudioData - Unsupported Format, must be little-endian PCM\n"); goto Exit; } if ((theFileFormat.mBitsPerChannel != 8) && (theFileFormat.mBitsPerChannel != 16)) { printf("MyGetOpenALAudioData - Unsupported Format, must be 8 or 16 bit PCM\n"); goto Exit; } thePropertySize = sizeof(fileDataSize); err = AudioFileGetProperty(afid, kAudioFilePropertyAudioDataByteCount, &thePropertySize, &fileDataSize); if(err) { printf("MyGetOpenALAudioData: AudioFileGetProperty(kAudioFilePropertyAudioDataByteCount) FAILED, Error = %ld\n", err); goto Exit; } // Read all the data into memory UInt32 dataSize = fileDataSize; theData = malloc(dataSize); if (theData) { AudioFileReadBytes(afid, false, 0, &dataSize, theData); if(err == noErr) { // success *outDataSize = (ALsizei)dataSize; *outDataFormat = (theFileFormat.mChannelsPerFrame > 1) ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16; *outSampleRate = (ALsizei)theFileFormat.mSampleRate; } else { // failure free (theData); theData = NULL; // make sure to return NULL printf("MyGetOpenALAudioData: ExtAudioFileRead FAILED, Error = %ld\n", err); goto Exit; } } Exit: // Dispose the ExtAudioFileRef, it is no longer needed if (afid) AudioFileClose(afid); return theData; }
bool AudioQueueStreamOut::Open(const char *FileName) { delete [] mInfo.mPacketDescs; mInfo.mPacketDescs = NULL; m_totalFrames = 0; mInfo.m_SeekToPacket = -1; try { CFURLRef sndFile = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)FileName, strlen(FileName), false); if (!sndFile) return false; OSStatus result = AudioFileOpenURL (sndFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &mInfo.mAudioFile); CFRelease (sndFile); UInt32 size = sizeof(mInfo.mDataFormat); XThrowIfError(AudioFileGetProperty(mInfo.mAudioFile, kAudioFilePropertyDataFormat, &size, &mInfo.mDataFormat), "couldn't get file's data format"); printf ("File format: "); mInfo.mDataFormat.Print(); XThrowIfError(AudioQueueNewOutput(&mInfo.mDataFormat, AudioQueueStreamOut::AQBufferCallback, this, NULL, kCFRunLoopCommonModes, 0, &mInfo.mQueue), "AudioQueueNew failed"); UInt32 bufferByteSize; // we need to calculate how many packets we read at a time, and how big a buffer we need // we base this on the size of the packets in the file and an approximate duration for each buffer { bool isFormatVBR = (mInfo.mDataFormat.mBytesPerPacket == 0 || mInfo.mDataFormat.mFramesPerPacket == 0); // first check to see what the max size of a packet is - if it is bigger // than our allocation default size, that needs to become larger UInt32 maxPacketSize; size = sizeof(maxPacketSize); XThrowIfError(AudioFileGetProperty(mInfo.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size"); // adjust buffer size to represent about a half second of audio based on this format CalculateBytesForTime (mInfo.mDataFormat, maxPacketSize, 0.5/*seconds*/, &bufferByteSize, &mInfo.mNumPacketsToRead); if (isFormatVBR) mInfo.mPacketDescs = new AudioStreamPacketDescription [mInfo.mNumPacketsToRead]; else mInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM) printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)mInfo.mNumPacketsToRead); } // (2) If the file has a cookie, we should get it and set it on the AQ size = sizeof(UInt32); result = AudioFileGetPropertyInfo (mInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL); if (!result && size) { char* cookie = new char [size]; XThrowIfError (AudioFileGetProperty (mInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file"); XThrowIfError (AudioQueueSetProperty(mInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue"); delete [] cookie; } // prime the queue with some data before starting mInfo.mDone = false; mInfo.mCurrentPacket = 0; for (UInt32 i = 0; i < sizeof(mInfo.mBuffers)/sizeof(mInfo.mBuffers[0]); ++i) { XThrowIfError(AudioQueueAllocateBuffer(mInfo.mQueue, bufferByteSize, &mInfo.mBuffers[i]), "AudioQueueAllocateBuffer failed"); AQBufferCallback (this, mInfo.mQueue, mInfo.mBuffers[i]); if (mInfo.mDone) break; } return IMUSIKStreamOutDefault::Create(NULL); } catch (CAXException e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); } return false; }