bool QueueAudioData::init() { OSStatus status = 0; // // Setup the audio device. // AudioStreamBasicDescription deviceFormat; deviceFormat.mSampleRate = 44100; deviceFormat.mFormatID = kAudioFormatLinearPCM; deviceFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat; deviceFormat.mBytesPerPacket = 8; deviceFormat.mFramesPerPacket = 1; deviceFormat.mBytesPerFrame = 8; deviceFormat.mChannelsPerFrame = 2; deviceFormat.mBitsPerChannel = 32; deviceFormat.mReserved = 0; // // Create a new output AudioQueue // for the device. // status = AudioQueueNewOutput(&deviceFormat, // Format processAudio, // Callback this, // User data, passed to the callback CFRunLoopGetMain(), // RunLoop 0, // kCFRunLoopDefaultMode, // RunLoop mode 0, // Flags ; must be zero (per documentation)... &audioQueue); // Output Float32 volume = 1.0; AudioQueueSetParameter(audioQueue, kAudioQueueParam_Volume, volume); // // Allocate buffers for the AudioQueue, // and pre-fill them. // static const int size = FRAME_SIZE * sizeof(float) * 2; for (int i = 0; i < NUMBER_OF_BUFFERS; ++i) { AudioQueueBufferRef buffer = 0; AudioQueueAllocateBuffer(audioQueue, size, &buffer); int n = buffer->mAudioDataBytesCapacity; memset(buffer->mAudioData, 0, n); buffer->mAudioDataByteSize = n; AudioQueueEnqueueBuffer(audioQueue, buffer, 0, 0); } initialized = true; return true; }
static void AQBufferCallback( void *in, AudioQueueRef inQ, AudioQueueBufferRef outQB) { AQCallbackStruct * inData; short *coreAudioBuffer; inData = (AQCallbackStruct *)in; coreAudioBuffer = (short*) outQB->mAudioData; if (inData->frameCount > 0) { AudioQueueSetParameter(inQ, kAudioQueueParam_Volume, __audioVolume); sound_mix(coreAudioBuffer, (inData->frameCount * 4) / 2); outQB->mAudioDataByteSize = inData->frameCount * 4; //(inData->frameCount * 4 < (sndOutLen) ? inData->frameCount * 4 : (sndOutLen)); AudioQueueEnqueueBuffer(inQ, outQB, 0, NULL); } }
static void AQBufferCallback( void *in, AudioQueueRef inQ, AudioQueueBufferRef outQB) { AQCallbackStruct * inData; short *coreAudioBuffer; inData = (AQCallbackStruct *)in; coreAudioBuffer = (short*) outQB->mAudioData; if (inData->frameCount > 0) { AudioQueueSetParameter(inQ, kAudioQueueParam_Volume, __audioVolume); sound_callback(NULL, (byte*)coreAudioBuffer, inData->frameCount * 4); outQB->mAudioDataByteSize = 4*inData->frameCount; AudioQueueEnqueueBuffer(inQ, outQB, 0, NULL); } }
void Audio_Queue::setPlayRate(float playRate) { if (!m_outAQ) { return; } UInt32 enableTimePitchConversion = (playRate != 1.0); if (playRate < 0.5) { playRate = 0.5; } if (playRate > 2.0) { playRate = 2.0; } AudioQueueSetProperty (m_outAQ, kAudioQueueProperty_EnableTimePitch, &enableTimePitchConversion, sizeof(enableTimePitchConversion)); AudioQueueSetParameter(m_outAQ, kAudioQueueParam_PlayRate, playRate); }
static void AQBufferCallback( void *userdata, AudioQueueRef outQ, AudioQueueBufferRef outQB) { outQB->mAudioDataByteSize = SI_SoundBufferSizeBytes; AudioQueueSetParameter(outQ, kAudioQueueParam_Volume, SI_AudioVolume); if(SI_EmulationPaused || !SI_EmulationRun || !SI_SoundIsInit) { SI_AudioIsOnHold = 1; memset(outQB->mAudioData, 0, SI_SoundBufferSizeBytes); } else { SI_AudioIsOnHold = 0; int available = S9xGetSampleCount()*2; if(available > SI_SoundBufferSizeBytes) available = SI_SoundBufferSizeBytes; S9xMixSamples((unsigned char*)outQB->mAudioData, available/2); if(available < SI_SoundBufferSizeBytes) { if(available == 0) { // do nothing here... we didn't copy anything... scared that if i write something to the output buffer, we'll get chirps and stuff } else { //printf("Fixing\n"); // sounds wiggly memset(((unsigned char*)outQB->mAudioData)+available, ((unsigned char*)outQB->mAudioData)[available-1], SI_SoundBufferSizeBytes-available); // sounds a little skippedly //memset(((unsigned char*)outQB->mAudioData)+available, *(int*)(((unsigned char*)outQB->mAudioData)+(available-3)), SI_SoundBufferSizeBytes-available); } } } AudioQueueEnqueueBuffer(outQ, outQB, 0, NULL); }
static void AQBufferCallback( void *userdata, AudioQueueRef outQ, AudioQueueBufferRef outQB) { unsigned char *coreAudioBuffer; coreAudioBuffer = (unsigned char*) outQB->mAudioData; outQB->mAudioDataByteSize = AUDIO_BUFFER_SIZE; AudioQueueSetParameter(outQ, kAudioQueueParam_Volume, __audioVolume); //fprintf(stderr, "sound_lastlen %d\n", sound_lastlen); if(__emulation_paused) { memset(coreAudioBuffer, 0, AUDIO_BUFFER_SIZE); } else { S9xMixSamplesO((short*)coreAudioBuffer, (AUDIO_BUFFER_SIZE) / 2, 0); } AudioQueueEnqueueBuffer(outQ, outQB, 0, NULL); }
void Audio_Queue::setPlayRate(float playRate) { Stream_Configuration *configuration = Stream_Configuration::configuration(); if (!configuration->enableTimeAndPitchConversion) { #if defined(DEBUG) || (TARGET_IPHONE_SIMULATOR) printf("*** FreeStreamer notification: Trying to set play rate for audio queue but enableTimeAndPitchConversion is disabled from configuration. Play rate settign will not work.\n"); #endif return; } if (!m_outAQ) { return; } if (playRate < 0.5) { playRate = 0.5; } if (playRate > 2.0) { playRate = 2.0; } AudioQueueSetParameter(m_outAQ, kAudioQueueParam_PlayRate, playRate); }
int main (int argc, const char * argv[]) { #if TARGET_OS_WIN32 InitializeQTML(0L); #endif const char *fpath = NULL; Float32 volume = 1; Float32 duration = -1; Float32 currentTime = 0.0; Float32 rate = 0; int rQuality = 0; bool doPrint = false; for (int i = 1; i < argc; ++i) { const char *arg = argv[i]; if (arg[0] != '-') { if (fpath != NULL) { fprintf(stderr, "may only specify one file to play\n"); usage(); } fpath = arg; } else { arg += 1; if (arg[0] == 'v' || !strcmp(arg, "-volume")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%f", &volume); } else if (arg[0] == 't' || !strcmp(arg, "-time")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%f", &duration); } else if (arg[0] == 'r' || !strcmp(arg, "-rate")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%f", &rate); } else if (arg[0] == 'q' || !strcmp(arg, "-rQuality")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%d", &rQuality); } else if (arg[0] == 'h' || !strcmp(arg, "-help")) { usage(); } else if (arg[0] == 'd' || !strcmp(arg, "-debug")) { doPrint = true; } else { fprintf(stderr, "unknown argument: %s\n\n", arg - 1); usage(); } } } if (fpath == NULL) usage(); if (doPrint) printf ("Playing file: %s\n", fpath); try { AQTestInfo myInfo; CFURLRef sndFile = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)fpath, strlen(fpath), false); if (!sndFile) XThrowIfError (!sndFile, "can't parse file path"); OSStatus result = AudioFileOpenURL (sndFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile); CFRelease (sndFile); XThrowIfError(result, "AudioFileOpen failed"); UInt32 size; XThrowIfError(AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info"); UInt32 numFormats = size / sizeof(AudioFormatListItem); AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ]; XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format"); numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it if (numFormats == 1) { // this is the common case myInfo.mDataFormat = formatList[0].mASBD; // see if there is a channel layout (multichannel file) result = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, NULL); if (result == noErr && myInfo.mChannelLayoutSize > 0) { myInfo.mChannelLayout = (AudioChannelLayout *)new char [myInfo.mChannelLayoutSize]; XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, myInfo.mChannelLayout), "get audio file's channel layout"); } } else { if (doPrint) { printf ("File has a %d layered data format:\n", (int)numFormats); for (unsigned int i = 0; i < numFormats; ++i) CAStreamBasicDescription(formatList[i].mASBD).Print(); } // now we should look to see which decoders we have on the system XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's"); UInt32 numDecoders = size / sizeof(OSType); OSType *decoderIDs = new OSType [ numDecoders ]; XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's"); unsigned int i = 0; for (; i < numFormats; ++i) { OSType decoderID = formatList[i].mASBD.mFormatID; bool found = false; for (unsigned int j = 0; j < numDecoders; ++j) { if (decoderID == decoderIDs[j]) { found = true; break; } } if (found) break; } delete [] decoderIDs; if (i >= numFormats) { fprintf (stderr, "Cannot play any of the formats in this file\n"); throw kAudioFileUnsupportedDataFormatError; } myInfo.mDataFormat = formatList[i].mASBD; myInfo.mChannelLayoutSize = sizeof(AudioChannelLayout); myInfo.mChannelLayout = (AudioChannelLayout*)new char [myInfo.mChannelLayoutSize]; myInfo.mChannelLayout->mChannelLayoutTag = formatList[i].mChannelLayoutTag; myInfo.mChannelLayout->mChannelBitmap = 0; myInfo.mChannelLayout->mNumberChannelDescriptions = 0; } delete [] formatList; if (doPrint) { printf ("Playing format: "); myInfo.mDataFormat.Print(); } XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat, AQTestBufferCallback, &myInfo, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &myInfo.mQueue), "AudioQueueNew failed"); UInt32 bufferByteSize; // we need to calculate how many packets we read at a time, and how big a buffer we need // we base this on the size of the packets in the file and an approximate duration for each buffer { bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0); // first check to see what the max size of a packet is - if it is bigger // than our allocation default size, that needs to become larger UInt32 maxPacketSize; size = sizeof(maxPacketSize); XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size"); // adjust buffer size to represent about a half second of audio based on this format CalculateBytesForTime (myInfo.mDataFormat, maxPacketSize, 0.5/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead); if (isFormatVBR) myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead]; else myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM) if (doPrint) printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead); } // (2) If the file has a cookie, we should get it and set it on the AQ size = sizeof(UInt32); result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL); if (!result && size) { char* cookie = new char [size]; XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file"); XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue"); delete [] cookie; } // set ACL if there is one if (myInfo.mChannelLayout) XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, myInfo.mChannelLayout, myInfo.mChannelLayoutSize), "set channel layout on queue"); // prime the queue with some data before starting myInfo.mDone = false; myInfo.mCurrentPacket = 0; for (int i = 0; i < kNumberBuffers; ++i) { XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffers[i]), "AudioQueueAllocateBuffer failed"); AQTestBufferCallback (&myInfo, myInfo.mQueue, myInfo.mBuffers[i]); if (myInfo.mDone) break; } // set the volume of the queue XThrowIfError (AudioQueueSetParameter(myInfo.mQueue, kAudioQueueParam_Volume, volume), "set queue volume"); XThrowIfError (AudioQueueAddPropertyListener (myInfo.mQueue, kAudioQueueProperty_IsRunning, MyAudioQueuePropertyListenerProc, NULL), "add listener"); #if !TARGET_OS_IPHONE if (rate > 0) { UInt32 propValue = 1; XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_EnableTimePitch, &propValue, sizeof(propValue)), "enable time pitch"); propValue = rQuality ? kAudioQueueTimePitchAlgorithm_Spectral : kAudioQueueTimePitchAlgorithm_TimeDomain; XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchAlgorithm, &propValue, sizeof(propValue)), "time pitch algorithm"); propValue = (rate == 1.0f ? 1 : 0); // bypass rate if 1.0 XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchBypass, &propValue, sizeof(propValue)), "bypass time pitch"); if (rate != 1) { XThrowIfError (AudioQueueSetParameter (myInfo.mQueue, kAudioQueueParam_PlayRate, rate), "set playback rate"); } if (doPrint) { printf ("Enable rate-scaled playback (rate = %.2f) using %s algorithm\n", rate, (rQuality ? "Spectral": "Time Domain")); } } #endif // lets start playing now - stop is called in the AQTestBufferCallback when there's // no more to read from the file XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed"); do { CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false); currentTime += .25; if (duration > 0 && currentTime >= duration) break; } while (gIsRunning); CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false); XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed"); XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed"); } catch (CAXException e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); } catch (...) { fprintf(stderr, "Unspecified exception\n"); } return 0; }
OSStatus SetVolume(Float32 inVolume) { mVolume = inVolume; return AudioQueueSetParameter(mQueue, kAudioQueueParam_Volume, mVolume * gMasterVolumeGain); }
static void AQBufferCallback( void *userdata, AudioQueueRef outQ, AudioQueueBufferRef outQB) { outQB->mAudioDataByteSize = SI_SoundBufferSizeBytes; AudioQueueSetParameter(outQ, kAudioQueueParam_Volume, SI_AudioVolume); if(SI_EmulationPaused || !SI_EmulationRun || !SI_SoundIsInit) { SI_AudioIsOnHold = 1; SI_AudioOffset = 0; memset(outQB->mAudioData, 0, SI_SoundBufferSizeBytes); } else { SI_AudioIsOnHold = 0; //static int i = 0; //printf("willLock %i\n", i); //printf("locked %i\n", i); //i++; int totalSamples = S9xGetSampleCount(); int totalBytes = totalSamples; int samplesToUse = totalSamples; int bytesToUse = totalBytes; if(Settings.SixteenBitSound == true) { bytesToUse *= 2; totalBytes *= 2; } if(bytesToUse > SI_SoundBufferSizeBytes) { bytesToUse = SI_SoundBufferSizeBytes; if(Settings.SixteenBitSound == true) samplesToUse = SI_SoundBufferSizeBytes/2; else samplesToUse = SI_SoundBufferSizeBytes; } // calculating the audio offset int samplesShouldBe = SI_SoundBufferSizeBytes; if(Settings.SixteenBitSound == true) samplesShouldBe = SI_SoundBufferSizeBytes/2; SI_AudioOffset -= (totalSamples-samplesShouldBe)*(1.0/Settings.SoundPlaybackRate)*1000-50; if(SI_AudioOffset > 8000) SI_AudioOffset = 4000; else if(SI_AudioOffset < -8000) SI_AudioOffset = -4000; //SI_AudioOffset = 900; // -900 is the magic number for this emulator running on iOS Simulator on my computer //printf("AudioOffset: %i\n", SI_AudioOffset); if(samplesToUse > 0) S9xMixSamples((unsigned char*)outQB->mAudioData, samplesToUse); if(bytesToUse < SI_SoundBufferSizeBytes) { if(bytesToUse == 0) { // do nothing here... we didn't copy anything... scared that if i write something to the output buffer, we'll get chirps and stuff //printf("0 sampes available\n"); } else { //printf("Fixing %i of %i\n", bytesToUse, SI_SoundBufferSizeBytes); // sounds wiggly memset(((unsigned char*)outQB->mAudioData)+bytesToUse, ((unsigned char*)outQB->mAudioData)[bytesToUse-1], SI_SoundBufferSizeBytes-bytesToUse); } } } AudioQueueEnqueueBuffer(outQ, outQB, 0, NULL); }
static void aq_start_w(MSFilter * f) { AQData *d = (AQData *) f->data; if (d->write_started == FALSE) { OSStatus aqresult; d->writeAudioFormat.mSampleRate = d->rate; d->writeAudioFormat.mFormatID = kAudioFormatLinearPCM; d->writeAudioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; d->writeAudioFormat.mFramesPerPacket = 1; d->writeAudioFormat.mChannelsPerFrame = 1; d->writeAudioFormat.mBitsPerChannel = d->bits; d->writeAudioFormat.mBytesPerPacket = d->bits / 8; d->writeAudioFormat.mBytesPerFrame = d->bits / 8; show_format("data provided to output filter", &d->writeAudioFormat); show_format("output device", &d->devicewriteFormat); memcpy(&d->devicewriteFormat, &d->writeAudioFormat, sizeof(d->writeAudioFormat)); d->writeBufferByteSize = kSecondsPerBuffer * d->devicewriteFormat.mSampleRate * (d->devicewriteFormat.mBitsPerChannel / 8) * d->devicewriteFormat.mChannelsPerFrame; #if 0 aqresult = AudioConverterNew(&d->writeAudioFormat, &d->devicewriteFormat, &d->writeAudioConverter); if (aqresult != noErr) { ms_error("d->writeAudioConverter = %d", aqresult); d->writeAudioConverter = NULL; } #endif // create the playback audio queue object aqresult = AudioQueueNewOutput(&d->devicewriteFormat, writeCallback, d, NULL, /*CFRunLoopGetCurrent () */ NULL, /*kCFRunLoopCommonModes */ 0, // run loop flags &d->writeQueue); if (aqresult != noErr) { ms_error("AudioQueueNewOutput = %d", aqresult); } AudioQueueSetParameter (d->writeQueue, kAudioQueueParam_Volume, gain_volume_out); if (d->uidname!=NULL){ char uidname[256]; CFStringGetCString(d->uidname, uidname, 256, CFStringGetSystemEncoding()); ms_message("AQ: using uidname:%s", uidname); aqresult = AudioQueueSetProperty(d->writeQueue, kAudioQueueProperty_CurrentDevice, &d->uidname, sizeof(CFStringRef)); if (aqresult != noErr) { ms_error ("AudioQueueSetProperty on kAudioQueueProperty_CurrentDevice %d", aqresult); } } setupWrite(f); d->curWriteBuffer = 0; } }
OSStatus darwin_configure_output_audio_queue ( cahal_device* in_device, cahal_playback_info* in_callback_info, FLOAT32 in_volume, AudioStreamBasicDescription* in_asbd, AudioQueueRef* out_audio_queue ) { OSStatus result = noErr; if( NULL != in_asbd ) { result = AudioQueueNewOutput ( in_asbd, darwin_playback_callback, in_callback_info, NULL, kCFRunLoopCommonModes, 0, out_audio_queue ); if( noErr == result ) { if( NULL != in_device->device_uid ) { CFStringRef device_uid = CFStringCreateWithCString ( NULL, in_device->device_uid, kCFStringEncodingASCII ); CPC_LOG ( CPC_LOG_LEVEL_TRACE, "Setting queue device to %s.", in_device->device_uid ); result = AudioQueueSetProperty ( *out_audio_queue, kAudioQueueProperty_CurrentDevice, &device_uid, sizeof( device_uid ) ); if( noErr == result ) { result = AudioQueueSetParameter ( *out_audio_queue, kAudioQueueParam_Volume, in_volume ); } if( NULL != device_uid ) { CFRelease( device_uid ); } } if( result ) { CPC_ERROR ( "Error setting current device (0x%x) to %s: 0x%x", kAudioQueueProperty_CurrentDevice, in_device->device_uid, result ); CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result ); } } else { CPC_ERROR ( "Error creating AudioQueue: 0x%x.", result ); CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result ); } } else { CPC_LOG_STRING ( CPC_LOG_LEVEL_ERROR, "Invalid basic stream description" ); } return( result ); }
void music_obj<audio_queue_driver>::volume(float v) { volume_ = glm::clamp(v, 0.0f, 1.0f); AudioQueueSetParameter(queue_, kAudioQueueParam_Volume, volume_); }
void music_obj<audio_queue_driver>::on_volume_change() { AudioQueueSetParameter(queue_, kAudioQueueParam_Volume, volume_ * sound_engine::instance().master_volume() * sound_engine::instance().music_volume()); }
void AudioOutputDeviceCoreAudio::CreateAndStartAudioQueue() throw(Exception) { OSStatus res = AudioQueueNewOutput ( &aqPlayerState.mDataFormat, HandleOutputBuffer, &aqPlayerState, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &aqPlayerState.mQueue ); if(res) { String s = String("AudioQueueNewOutput: Error ") + ToString(res); throw Exception(s); } CFStringRef devUID = CFStringCreateWithCString ( NULL, CurrentDevice.GetUID().c_str(), kCFStringEncodingASCII ); res = AudioQueueSetProperty ( aqPlayerState.mQueue, kAudioQueueProperty_CurrentDevice, &devUID, sizeof(CFStringRef) ); CFRelease(devUID); if(res) { String s = String("Failed to set audio device: ") + ToString(res); throw Exception(s); } for (int i = 0; i < uiBufferNumber; ++i) { res = AudioQueueAllocateBuffer ( aqPlayerState.mQueue, aqPlayerState.bufferByteSize, &aqPlayerState.mBuffers[i] ); if(res) { String s = String("AudioQueueAllocateBuffer: Error "); throw Exception(s + ToString(res)); } } res = AudioQueueAddPropertyListener ( aqPlayerState.mQueue, kAudioQueueProperty_CurrentDevice, AudioQueueListener, NULL ); if(res) std::cerr << "Failed to register device change listener: " << res << std::endl; res = AudioQueueAddPropertyListener ( aqPlayerState.mQueue, kAudioQueueProperty_IsRunning, AudioQueueListener, NULL ); if(res) std::cerr << "Failed to register running listener: " << res << std::endl; Float32 gain = 1.0; res = AudioQueueSetParameter ( aqPlayerState.mQueue, kAudioQueueParam_Volume, gain ); if(res) std::cerr << "AudioQueueSetParameter: Error " << res << std::endl; atomic_set(&(aqPlayerState.mIsRunning), 1); FillBuffers(); PrimeAudioQueue(); res = AudioQueueStart(aqPlayerState.mQueue, NULL); if(res) { String s = String("AudioQueueStart: Error ") + ToString(res); throw Exception(s); } }
void playFile(const char* filePath) { CFURLRef audioFileURL = CFURLCreateFromFileSystemRepresentation(NULL, (UInt8*) filePath, strlen (filePath), false); OSStatus result = AudioFileOpenURL(audioFileURL, fsRdPerm, 0, &aqData.mAudioFile); CFRelease (audioFileURL); UInt32 dataFormatSize = sizeof (aqData.mDataFormat); AudioFileGetProperty(aqData.mAudioFile, kAudioFilePropertyDataFormat, &dataFormatSize, &aqData.mDataFormat); AudioQueueNewOutput(&aqData.mDataFormat, HandleOutputBuffer, &aqData, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &aqData.mQueue); UInt32 maxPacketSize; UInt32 propertySize = sizeof (maxPacketSize); AudioFileGetProperty(aqData.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &propertySize, &maxPacketSize); DeriveBufferSize(&aqData.mDataFormat, maxPacketSize, 0.5, &aqData.bufferByteSize, &aqData.mNumPacketsToRead); bool isFormatVBR = (aqData.mDataFormat.mBytesPerPacket == 0 || aqData.mDataFormat.mFramesPerPacket == 0); if (isFormatVBR) { // LOG("%s\n","VBR"); aqData.mPacketDescs = (AudioStreamPacketDescription*) malloc (aqData.mNumPacketsToRead * sizeof (AudioStreamPacketDescription)); } else { aqData.mPacketDescs = NULL; } UInt32 cookieSize = sizeof (UInt32); bool couldNotGetProperty = AudioFileGetPropertyInfo (aqData.mAudioFile, kAudioFilePropertyMagicCookieData, &cookieSize, NULL); if (!couldNotGetProperty && cookieSize) { char* magicCookie = (char *) malloc (cookieSize); AudioFileGetProperty (aqData.mAudioFile, kAudioFilePropertyMagicCookieData, &cookieSize, magicCookie); AudioQueueSetProperty (aqData.mQueue, kAudioQueueProperty_MagicCookie, magicCookie, cookieSize); free (magicCookie); } aqData.mCurrentPacket = 0; aqData.mIsRunning = true; //LOG("%d\n", aqData.mNumPacketsToRead); for (int i = 0; i < kNumberBuffers; ++i) { AudioQueueAllocateBuffer (aqData.mQueue, aqData.bufferByteSize, &aqData.mBuffers[i]); HandleOutputBuffer (&aqData, aqData.mQueue, aqData.mBuffers[i]); } Float32 gain = 1.0; // Optionally, allow user to override gain setting here AudioQueueSetParameter (aqData.mQueue, kAudioQueueParam_Volume, gain); //LOG("%s\n","Starting play"); // IMPORTANT NOTE : This value must be set // Before the call to HandleOutputBuffer //a qData.mIsRunning = true; AudioQueueStart (aqData.mQueue, NULL); }
void Audio_Queue::setVolume(float volume) { if (!m_outAQ) { return; } AudioQueueSetParameter(m_outAQ, kAudioQueueParam_Volume, volume); }