void GbApuEmulator::initializeAudioPlaybackQueue() { Float32 gain = 1.0; // Create new output int error = AudioQueueNewOutput(&(gbAPUState->dataFormat), HandleOutputBuffer, gbAPUState, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &(gbAPUState->queue)); if (error) printf("AudioQueueNewOutput: %d\n", error); // Set buffer size gbAPUState->numPacketsToRead = 735 * 4; // 44.1kHz at 60 fps = 735 (times 4 to reduce overhead) gbAPUState->bufferByteSize = gbAPUState->numPacketsToRead * 2; // Packets times 16-bits per sample // Allocate those bufferes for (int i = 0; i < NUM_BUFFERS; ++i) { AudioQueueAllocateBuffer(gbAPUState->queue, gbAPUState->bufferByteSize, &(gbAPUState->buffers[i])); if (error) printf("AudioQueueAllocateBuffer: %d\n", error); } AudioQueueSetParameter(gbAPUState->queue, kAudioQueueParam_Volume, gain); }
void play() { int i; AudioStreamBasicDescription format; AudioQueueRef queue; AudioQueueBufferRef buffers[NUM_BUFFERS]; format.mSampleRate = SAMPLE_RATE; format.mFormatID = kAudioFormatLinearPCM; format.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; format.mBitsPerChannel = 8*sizeof(SAMPLE_TYPE); format.mChannelsPerFrame = NUM_CHANNELS; format.mBytesPerFrame = sizeof(SAMPLE_TYPE)*NUM_CHANNELS; format.mFramesPerPacket = 1; format.mBytesPerPacket = format.mBytesPerFrame*format.mFramesPerPacket; format.mReserved = 0; AudioQueueNewOutput(&format, callback, NULL, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &queue); for (i = 0; i < NUM_BUFFERS; i++) { AudioQueueAllocateBuffer(queue, BUFFER_SIZE, &buffers[i]); buffers[i]->mAudioDataByteSize = BUFFER_SIZE; callback(NULL, queue, buffers[i]); } AudioQueueStart(queue, NULL); CFRunLoopRun(); }
void CL_SoundOutput_MacOSX::mixer_thread_starting() { audio_format.mSampleRate = frequency; audio_format.mFormatID = kAudioFormatLinearPCM; audio_format.mFormatFlags = kAudioFormatFlagsCanonical; audio_format.mBytesPerPacket = 2 * sizeof(short); audio_format.mFramesPerPacket = 1; audio_format.mBytesPerFrame = 2 * sizeof(short); audio_format.mChannelsPerFrame = 2; audio_format.mBitsPerChannel = sizeof(short) * 8; audio_format.mReserved = 0; OSStatus result = AudioQueueNewOutput(&audio_format, &CL_SoundOutput_MacOSX::static_audio_queue_callback, this, CFRunLoopGetCurrent(), kCFRunLoopDefaultMode, 0, &audio_queue); if (result != 0) throw CL_Exception("AudioQueueNewOutput failed"); for (int i = 0; i < fragment_buffer_count; i++) { result = AudioQueueAllocateBuffer(audio_queue, fragment_size * sizeof(short) * 2, &audio_buffers[i]); if (result != 0) throw CL_Exception("AudioQueueAllocateBuffer failed"); audio_queue_callback(audio_queue, audio_buffers[i]); } result = AudioQueueStart(audio_queue, 0); if (result != 0) throw CL_Exception("AudioQueueStart failed"); }
music_obj<audio_queue_driver>::music_obj(const boost::shared_ptr<ifdstream>& ifd, bool loop, float gain, float start, float end) : packet_index_(0) , start_packet_index_(0) , stop_packet_index_(0) , volume_(gain) , loop_(loop) , is_paused_(false) , ifd_(ifd) { LOG("Got ifdstream from path.."); OSStatus res = AudioFileOpenWithCallbacks(this, &music_obj::af_read_cb, &music_obj::af_write_cb, &music_obj::af_get_size_cb, &music_obj::af_set_size_cb, kAudioFileCAFType, &audio_file_); if(res) { throw sys_exception("audio_queue_driver: couldn't open audio file in liverpool fs. AudioFile returned " + boost::lexical_cast<std::string>(res)); } UInt32 size = sizeof(data_format_); AudioFileGetProperty(audio_file_, kAudioFilePropertyDataFormat, &size, &data_format_); AudioQueueNewOutput(&data_format_, &music_obj<audio_queue_driver>::buffer_cb, this, NULL, NULL, 0, &queue_); AudioQueueAddPropertyListener(queue_, kAudioQueueProperty_IsRunning, &music_obj<audio_queue_driver>::playback_cb, this); if (data_format_.mBytesPerPacket == 0 || data_format_.mFramesPerPacket == 0) { size = sizeof(max_packet_size_); AudioFileGetProperty(audio_file_, kAudioFilePropertyPacketSizeUpperBound, &size, &max_packet_size_); if (max_packet_size_ > BUFFER_SIZE_BYTES) { max_packet_size_ = BUFFER_SIZE_BYTES; } num_packets_to_read_ = BUFFER_SIZE_BYTES / max_packet_size_; packet_descriptions_ = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * num_packets_to_read_); } else { num_packets_to_read_ = BUFFER_SIZE_BYTES / data_format_.mBytesPerPacket; packet_descriptions_ = NULL; } AudioFileGetPropertyInfo(audio_file_, kAudioFilePropertyMagicCookieData, &size, NULL); if (size > 0) { char* cookie = (char*)malloc(sizeof(char) * size); AudioFileGetProperty(audio_file_, kAudioFilePropertyMagicCookieData, &size, cookie); AudioQueueSetProperty(queue_, kAudioQueueProperty_MagicCookie, cookie, size); free(cookie); } calculate_seek(start, end); volume(volume_); prime(); }
int32_t setup_queue( ALACMagicCookie cookie, PlayerInfo *playerInfo, uint32_t buffer_size, uint32_t num_buffers, uint32_t num_packets ) { // Create Audio Queue for ALAC AudioStreamBasicDescription inFormat = {0}; inFormat.mSampleRate = ntohl(cookie.sampleRate); inFormat.mFormatID = kAudioFormatAppleLossless; inFormat.mFormatFlags = 0; // ALAC uses no flags inFormat.mBytesPerPacket = 0; // Variable size (must use AudioStreamPacketDescription) inFormat.mFramesPerPacket = ntohl(cookie.frameLength); inFormat.mBytesPerFrame = 0; // Compressed inFormat.mChannelsPerFrame = 2; // Stero TODO: get from fmtp? inFormat.mBitsPerChannel = 0; // Compressed inFormat.mReserved = 0; OSStatus err = AudioQueueNewOutput( &inFormat, c_callback, playerInfo, // User data NULL, // Run on audio queue's thread NULL, // Callback run loop's mode 0, // Reserved &playerInfo->queue); if (err) return err; // Need to set the magic cookie too (tail fmtp) err = AudioQueueSetProperty(playerInfo->queue, kAudioQueueProperty_MagicCookie, &cookie, sizeof(ALACMagicCookie)); if (err) return err; // Create input buffers, and enqueue using callback for (int i = 0; i < num_buffers; i++) { AudioQueueBufferRef buffer; err = AudioQueueAllocateBufferWithPacketDescriptions( playerInfo->queue, buffer_size, num_packets, &buffer); if (err) return err; c_callback(playerInfo, playerInfo->queue, buffer); } // Volume full err = AudioQueueSetParameter(playerInfo->queue, kAudioQueueParam_Volume, 1.0); if (err) return err; // Prime err = AudioQueuePrime(playerInfo->queue, 0, NULL); if (err) return err; // Start err = AudioQueueStart(playerInfo->queue, NULL); if (err) return err; return 0; }
void Audio_Queue::init() { OSStatus err = noErr; cleanup(); // create the audio queue err = AudioQueueNewOutput(&m_streamDesc, audioQueueOutputCallback, this, CFRunLoopGetCurrent(), NULL, 0, &m_outAQ); if (err) { AQ_TRACE("%s: error in AudioQueueNewOutput\n", __PRETTY_FUNCTION__); m_lastError = err; if (m_delegate) { m_delegate->audioQueueInitializationFailed(); } return; } Stream_Configuration *configuration = Stream_Configuration::configuration(); // allocate audio queue buffers for (unsigned int i = 0; i < configuration->bufferCount; ++i) { err = AudioQueueAllocateBuffer(m_outAQ, configuration->bufferSize, &m_audioQueueBuffer[i]); if (err) { /* If allocating the buffers failed, everything else will fail, too. * Dispose the queue so that we can later on detect that this * queue in fact has not been initialized. */ AQ_TRACE("%s: error in AudioQueueAllocateBuffer\n", __PRETTY_FUNCTION__); (void)AudioQueueDispose(m_outAQ, true); m_outAQ = 0; m_lastError = err; if (m_delegate) { m_delegate->audioQueueInitializationFailed(); } return; } } // listen for kAudioQueueProperty_IsRunning err = AudioQueueAddPropertyListener(m_outAQ, kAudioQueueProperty_IsRunning, audioQueueIsRunningCallback, this); if (err) { AQ_TRACE("%s: error in AudioQueueAddPropertyListener\n", __PRETTY_FUNCTION__); m_lastError = err; return; } if (m_initialOutputVolume != 1.0) { setVolume(m_initialOutputVolume); } }
static int Open ( vlc_object_t *p_this ) { audio_output_t *p_aout = (audio_output_t *)p_this; struct aout_sys_t *p_sys = malloc(sizeof(aout_sys_t)); p_aout->sys = p_sys; OSStatus status = 0; // Setup the audio device. AudioStreamBasicDescription deviceFormat; deviceFormat.mSampleRate = 44100; deviceFormat.mFormatID = kAudioFormatLinearPCM; deviceFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger; // Signed integer, little endian deviceFormat.mBytesPerPacket = 4; deviceFormat.mFramesPerPacket = 1; deviceFormat.mBytesPerFrame = 4; deviceFormat.mChannelsPerFrame = 2; deviceFormat.mBitsPerChannel = 16; deviceFormat.mReserved = 0; // Create a new output AudioQueue for the device. status = AudioQueueNewOutput(&deviceFormat, // Format AudioQueueCallback, // Callback p_aout, // User data, passed to the callback CFRunLoopGetMain(), // RunLoop kCFRunLoopDefaultMode, // RunLoop mode 0, // Flags ; must be zero (per documentation)... &(p_sys->audioQueue)); // Output // This will be used for boosting the audio without the need of a mixer (floating-point conversion is expensive on ARM) // AudioQueueSetParameter(p_sys->audioQueue, kAudioQueueParam_Volume, 12.0); // Defaults to 1.0 msg_Dbg(p_aout, "New AudioQueue output created (status = %i)", status); // Allocate buffers for the AudioQueue, and pre-fill them. for (int i = 0; i < NUMBER_OF_BUFFERS; ++i) { AudioQueueBufferRef buffer = NULL; status = AudioQueueAllocateBuffer(p_sys->audioQueue, FRAME_SIZE * 4, &buffer); AudioQueueCallback(NULL, p_sys->audioQueue, buffer); } /* Volume is entirely done in software. */ aout_SoftVolumeInit( p_aout ); p_aout->format.i_format = VLC_CODEC_S16L; p_aout->format.i_physical_channels = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT; p_aout->format.i_rate = 44100; aout_PacketInit(p_aout, &p_sys->packet, FRAME_SIZE); p_aout->pf_play = aout_PacketPlay; p_aout->pf_pause = aout_PacketPause; p_aout->pf_flush = aout_PacketFlush; msg_Dbg(p_aout, "Starting AudioQueue (status = %i)", status); status = AudioQueueStart(p_sys->audioQueue, NULL); return VLC_SUCCESS; }
int app_OpenSound(int samples_per_sync, int sample_rate) { Float64 sampleRate = 44100.0; int i; LOGDEBUG("app_SoundOpen()"); app_MuteSound(); if(preferences.muted) { return 0; } soundInit = 0; in.mDataFormat.mSampleRate = sampleRate; in.mDataFormat.mFormatID = kAudioFormatLinearPCM; in.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; in.mDataFormat.mBytesPerPacket = 4; in.mDataFormat.mFramesPerPacket = 1; in.mDataFormat.mBytesPerFrame = 2; in.mDataFormat.mChannelsPerFrame = 2; in.mDataFormat.mBitsPerChannel = 16; /* Pre-buffer before we turn on audio */ UInt32 err; err = AudioQueueNewOutput(&in.mDataFormat, AQBufferCallback, &in, NULL, kCFRunLoopCommonModes, 0, &in.queue); if (err) { LOGDEBUG("AudioQueueNewOutput err %d\n", err); } in.frameCount = 512 * 1; //512; //(1024 * (16)) / 4; UInt32 bufferBytes = in.frameCount * in.mDataFormat.mBytesPerFrame; for (i=0; i<AUDIO_BUFFERS; i++) { err = AudioQueueAllocateBuffer(in.queue, bufferBytes, &in.mBuffers[i]); if (err) { LOGDEBUG("AudioQueueAllocateBuffer[%d] err %d\n",i, err); } /* "Prime" by calling the callback once per buffer */ AQBufferCallback (&in, in.queue, in.mBuffers[i]); } soundInit = 1; LOGDEBUG("app_QueueSample.AudioQueueStart"); err = AudioQueueStart(in.queue, NULL); return 0; }
void MyPropertyListenerProc( void * inClientData, AudioFileStreamID inAudioFileStream, AudioFileStreamPropertyID inPropertyID, UInt32 * ioFlags) { // this is called by audio file stream when it finds property values MyData* myData = (MyData*)inClientData; OSStatus err = noErr; printf("found property '%c%c%c%c'\n", (inPropertyID>>24)&255, (inPropertyID>>16)&255, (inPropertyID>>8)&255, inPropertyID&255); switch (inPropertyID) { case kAudioFileStreamProperty_ReadyToProducePackets : { // the file stream parser is now ready to produce audio packets. // get the stream format. AudioStreamBasicDescription asbd; UInt32 asbdSize = sizeof(asbd); err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataFormat, &asbdSize, &asbd); if (err) { PRINTERROR("get kAudioFileStreamProperty_DataFormat"); myData->failed = true; break; } // create the audio queue err = AudioQueueNewOutput(&asbd, MyAudioQueueOutputCallback, myData, NULL, NULL, 0, &myData->audioQueue); if (err) { PRINTERROR("AudioQueueNewOutput"); myData->failed = true; break; } // allocate audio queue buffers for (unsigned int i = 0; i < kNumAQBufs; ++i) { err = AudioQueueAllocateBuffer(myData->audioQueue, kAQBufSize, &myData->audioQueueBuffer[i]); if (err) { PRINTERROR("AudioQueueAllocateBuffer"); myData->failed = true; break; } } // get the cookie size UInt32 cookieSize; Boolean writable; err = AudioFileStreamGetPropertyInfo(inAudioFileStream, kAudioFileStreamProperty_MagicCookieData, &cookieSize, &writable); if (err) { PRINTERROR("info kAudioFileStreamProperty_MagicCookieData"); break; } printf("cookieSize %d\n", cookieSize); // get the cookie data void* cookieData = calloc(1, cookieSize); err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_MagicCookieData, &cookieSize, cookieData); if (err) { PRINTERROR("get kAudioFileStreamProperty_MagicCookieData"); free(cookieData); break; } // set the cookie on the queue. err = AudioQueueSetProperty(myData->audioQueue, kAudioQueueProperty_MagicCookie, cookieData, cookieSize); free(cookieData); if (err) { PRINTERROR("set kAudioQueueProperty_MagicCookie"); break; } // listen for kAudioQueueProperty_IsRunning err = AudioQueueAddPropertyListener(myData->audioQueue, kAudioQueueProperty_IsRunning, MyAudioQueueIsRunningCallback, myData); if (err) { PRINTERROR("AudioQueueAddPropertyListener"); myData->failed = true; break; } break; } } }
music_obj<audio_queue_driver>::music_obj(const std::string& file_path, bool loop, float gain, float start, float end) : packet_index_(0) , start_packet_index_(0) , stop_packet_index_(0) , volume_(gain) , loop_(loop) , is_paused_(false) { CFURLRef file_url = CFURLCreateFromFileSystemRepresentation(NULL, (const UInt8 *)file_path.c_str(), file_path.size(), false); OSStatus res = AudioFileOpenURL(file_url, kAudioFileReadPermission, kAudioFileCAFType, &audio_file_); CFRelease(file_url); if(res) { throw sys_exception("audio_queue_driver: couldn't open audio file at '" + file_path + "'"); } UInt32 size = sizeof(data_format_); AudioFileGetProperty(audio_file_, kAudioFilePropertyDataFormat, &size, &data_format_); AudioQueueNewOutput(&data_format_, &music_obj<audio_queue_driver>::buffer_cb, this, NULL, NULL, 0, &queue_); AudioQueueAddPropertyListener(queue_, kAudioQueueProperty_IsRunning, &music_obj<audio_queue_driver>::playback_cb, this); if (data_format_.mBytesPerPacket == 0 || data_format_.mFramesPerPacket == 0) { size = sizeof(max_packet_size_); AudioFileGetProperty(audio_file_, kAudioFilePropertyPacketSizeUpperBound, &size, &max_packet_size_); if (max_packet_size_ > BUFFER_SIZE_BYTES) { max_packet_size_ = BUFFER_SIZE_BYTES; } num_packets_to_read_ = BUFFER_SIZE_BYTES / max_packet_size_; packet_descriptions_ = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * num_packets_to_read_); } else { num_packets_to_read_ = BUFFER_SIZE_BYTES / data_format_.mBytesPerPacket; packet_descriptions_ = NULL; } AudioFileGetPropertyInfo(audio_file_, kAudioFilePropertyMagicCookieData, &size, NULL); if (size > 0) { char* cookie = (char*)malloc(sizeof(char) * size); AudioFileGetProperty(audio_file_, kAudioFilePropertyMagicCookieData, &size, cookie); AudioQueueSetProperty(queue_, kAudioQueueProperty_MagicCookie, cookie, size); free(cookie); } calculate_seek(start, end); volume(volume_); prime(); }
void Audio_Queue::handlePropertyChange(AudioFileStreamID inAudioFileStream, AudioFileStreamPropertyID inPropertyID, UInt32 *ioFlags) { OSStatus err = noErr; AQ_TRACE("found property '%lu%lu%lu%lu'\n", (inPropertyID>>24)&255, (inPropertyID>>16)&255, (inPropertyID>>8)&255, inPropertyID&255); switch (inPropertyID) { case kAudioFileStreamProperty_ReadyToProducePackets: { // the file stream parser is now ready to produce audio packets. // get the stream format. AudioStreamBasicDescription asbd; memset(&asbd, 0, sizeof(asbd)); UInt32 asbdSize = sizeof(asbd); err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataFormat, &asbdSize, &asbd); if (err) { AQ_TRACE("get kAudioFileStreamProperty_DataFormat\n"); m_lastError = err; break; } // create the audio queue err = AudioQueueNewOutput(&asbd, audioQueueOutputCallback, this, NULL, NULL, 0, &m_outAQ); if (err) { AQ_TRACE("AudioQueueNewOutput\n"); m_lastError = err; break; } // allocate audio queue buffers for (unsigned int i = 0; i < AQ_BUFFERS; ++i) { err = AudioQueueAllocateBuffer(m_outAQ, AQ_BUFSIZ, &m_audioQueueBuffer[i]); if (err) { AQ_TRACE("AudioQueueAllocateBuffer\n"); m_lastError = err; break; } } setCookiesForStream(inAudioFileStream); // listen for kAudioQueueProperty_IsRunning err = AudioQueueAddPropertyListener(m_outAQ, kAudioQueueProperty_IsRunning, audioQueueIsRunningCallback, this); if (err) { AQ_TRACE("error in AudioQueueAddPropertyListener"); m_lastError = err; break; } break; } } }
static void rdpsnd_audio_open(rdpsndDevicePlugin* device, AUDIO_FORMAT* format, int latency) { int rv; int i; rdpsndAudioQPlugin* aq_plugin_p = (rdpsndAudioQPlugin *) device; if (aq_plugin_p->is_open) { return; } aq_plugin_p->buf_index = 0; // setup AudioStreamBasicDescription aq_plugin_p->data_format.mSampleRate = 44100; aq_plugin_p->data_format.mFormatID = kAudioFormatLinearPCM; aq_plugin_p->data_format.mFormatFlags = kAudioFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; // until we know better, assume that one packet = one frame // one frame = bytes_per_sample x number_of_channels aq_plugin_p->data_format.mBytesPerPacket = 4; aq_plugin_p->data_format.mFramesPerPacket = 1; aq_plugin_p->data_format.mBytesPerFrame = 4; aq_plugin_p->data_format.mChannelsPerFrame = 2; aq_plugin_p->data_format.mBitsPerChannel = 16; rv = AudioQueueNewOutput( &aq_plugin_p->data_format, // audio stream basic desc aq_playback_cb, // callback when more data is required aq_plugin_p, // data to pass to callback CFRunLoopGetCurrent(), // The current run loop, and the one on // which the audio queue playback callback // will be invoked kCFRunLoopCommonModes, // run loop modes in which callbacks can // be invoked 0, // flags - reserved &aq_plugin_p->aq_ref ); if (rv != 0) { printf("rdpsnd_audio_open: AudioQueueNewOutput() failed with error %d\n", rv); aq_plugin_p->is_open = 1; return; } for (i = 0; i < AQ_NUM_BUFFERS; i++) { rv = AudioQueueAllocateBuffer(aq_plugin_p->aq_ref, AQ_BUF_SIZE, &aq_plugin_p->buffers[i]); } aq_plugin_p->is_open = 1; }
void CAudioQueueManager::setupQueue() { OSStatus res = AudioQueueNewOutput(&_dataFormat, HandleOutputBuffer, this, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &_queue); for (int i = 0; i < kNumberBuffers; i++) { res = AudioQueueAllocateBuffer(_queue, _bytesPerFrame, &_buffers[i]); HandleOutputBuffer(this, _queue, _buffers[i]); } if (_autoStart) { _isRunning = true; res = AudioQueueStart(_queue, NULL); } _isInitialized = true; }
int playbuffer(void *pcmbuffer, unsigned long len) { AQCallbackStruct aqc; UInt32 err, bufferSize; int i; aqc.mDataFormat.mSampleRate = SAMPLE_RATE; aqc.mDataFormat.mFormatID = kAudioFormatLinearPCM; aqc.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; aqc.mDataFormat.mBytesPerPacket = 4; aqc.mDataFormat.mFramesPerPacket = 1; aqc.mDataFormat.mBytesPerFrame = 4; aqc.mDataFormat.mChannelsPerFrame = 2; aqc.mDataFormat.mBitsPerChannel = 16; aqc.frameCount = FRAME_COUNT; aqc.sampleLen = len / BYTES_PER_SAMPLE; aqc.playPtr = 0; aqc.pcmBuffer = (sampleFrame *)pcmbuffer; err = AudioQueueNewOutput(&aqc.mDataFormat, AQBufferCallback, &aqc, NULL, kCFRunLoopCommonModes, 0, &aqc.queue); if (err) return err; aqc.frameCount = FRAME_COUNT; bufferSize = aqc.frameCount * aqc.mDataFormat.mBytesPerFrame; for (i=0; i<AUDIO_BUFFERS; i++) { err = AudioQueueAllocateBuffer(aqc.queue, bufferSize, &aqc.mBuffers[i]); if (err) return err; AQBufferCallback(&aqc, aqc.queue, aqc.mBuffers[i]); } err = AudioQueueStart(aqc.queue, NULL); if (err) return err; struct timeval tv = {1.0, 0}; while(aqc.playPtr < aqc.sampleLen) { select(0, NULL, NULL, NULL, &tv); } sleep(1); return 0; }
/** @internal @This creates a new audioqueue * @param upipe description structure of the pipe * @param flow description structure of the flow * @return an error code */ static int upipe_osx_audioqueue_sink_set_flow_def(struct upipe *upipe, struct uref *flow) { OSStatus status; uint64_t sample_rate = 0; /* hush gcc */ uint8_t channels = 0; uint8_t sample_size = 0; struct AudioStreamBasicDescription fmt; struct upipe_osx_audioqueue_sink *osx_audioqueue = upipe_osx_audioqueue_sink_from_upipe(upipe); if (unlikely(osx_audioqueue->queue)) { upipe_osx_audioqueue_sink_remove(upipe); } /* retrieve flow format information */ uref_sound_flow_get_rate(flow, &sample_rate); uref_sound_flow_get_sample_size(flow, &sample_size); uref_sound_flow_get_channels(flow, &channels); /* build format description */ memset(&fmt, 0, sizeof(struct AudioStreamBasicDescription)); fmt.mSampleRate = sample_rate; fmt.mFormatID = kAudioFormatLinearPCM; fmt.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; fmt.mFramesPerPacket = 1; fmt.mChannelsPerFrame = channels; fmt.mBytesPerPacket = fmt.mBytesPerFrame = sample_size * channels; fmt.mBitsPerChannel = sample_size * 8; /* create queue */ status = AudioQueueNewOutput(&fmt, upipe_osx_audioqueue_sink_cb, upipe, NULL, kCFRunLoopCommonModes, 0, &osx_audioqueue->queue); if (unlikely(status == kAudioFormatUnsupportedDataFormatError)) { upipe_warn(upipe, "unsupported data format"); return UBASE_ERR_EXTERNAL; } /* change volume */ AudioQueueSetParameter(osx_audioqueue->queue, kAudioQueueParam_Volume, osx_audioqueue->volume); /* start queue ! */ AudioQueueStart(osx_audioqueue->queue, NULL); upipe_notice_va(upipe, "audioqueue started (%uHz, %hhuch, %db)", sample_rate, channels, sample_size*8); return UBASE_ERR_NONE; }
int app_OpenSound(int buffersize) { Float64 sampleRate = 22050.0; int i; UInt32 bufferBytes; soundBufferSize = buffersize; app_MuteSound(); soundInit = 0; in.mDataFormat.mSampleRate = sampleRate; in.mDataFormat.mFormatID = kAudioFormatLinearPCM; in.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; in.mDataFormat.mBytesPerPacket = 4; in.mDataFormat.mFramesPerPacket = isStereo ? 1 : 2; in.mDataFormat.mBytesPerFrame = isStereo ? 4 : 2; in.mDataFormat.mChannelsPerFrame = isStereo ? 2 : 1; in.mDataFormat.mBitsPerChannel = 16; /* Pre-buffer before we turn on audio */ UInt32 err; err = AudioQueueNewOutput(&in.mDataFormat, AQBufferCallback, NULL, NULL, kCFRunLoopCommonModes, 0, &in.queue); bufferBytes = AUDIO_BUFFER_SIZE; for (i=0; i<AUDIO_BUFFERS; i++) { err = AudioQueueAllocateBuffer(in.queue, bufferBytes, &in.mBuffers[i]); /* "Prime" by calling the callback once per buffer */ //AQBufferCallback (&in, in.queue, in.mBuffers[i]); in.mBuffers[i]->mAudioDataByteSize = AUDIO_BUFFER_SIZE; //samples_per_frame * 2; //inData->mDataFormat.mBytesPerFrame; //(inData->frameCount * 4 < (sndOutLen) ? inData->frameCount * 4 : (sndOutLen)); AudioQueueEnqueueBuffer(in.queue, in.mBuffers[i], 0, NULL); } soundInit = 1; err = AudioQueueStart(in.queue, NULL); return 0; }
bool QueueAudioData::init() { OSStatus status = 0; // // Setup the audio device. // AudioStreamBasicDescription deviceFormat; deviceFormat.mSampleRate = 44100; deviceFormat.mFormatID = kAudioFormatLinearPCM; deviceFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat; deviceFormat.mBytesPerPacket = 8; deviceFormat.mFramesPerPacket = 1; deviceFormat.mBytesPerFrame = 8; deviceFormat.mChannelsPerFrame = 2; deviceFormat.mBitsPerChannel = 32; deviceFormat.mReserved = 0; // // Create a new output AudioQueue // for the device. // status = AudioQueueNewOutput(&deviceFormat, // Format processAudio, // Callback this, // User data, passed to the callback CFRunLoopGetMain(), // RunLoop 0, // kCFRunLoopDefaultMode, // RunLoop mode 0, // Flags ; must be zero (per documentation)... &audioQueue); // Output Float32 volume = 1.0; AudioQueueSetParameter(audioQueue, kAudioQueueParam_Volume, volume); // // Allocate buffers for the AudioQueue, // and pre-fill them. // static const int size = FRAME_SIZE * sizeof(float) * 2; for (int i = 0; i < NUMBER_OF_BUFFERS; ++i) { AudioQueueBufferRef buffer = 0; AudioQueueAllocateBuffer(audioQueue, size, &buffer); int n = buffer->mAudioDataBytesCapacity; memset(buffer->mAudioData, 0, n); buffer->mAudioDataByteSize = n; AudioQueueEnqueueBuffer(audioQueue, buffer, 0, 0); } initialized = true; return true; }
int SIOpenSound(int buffersize) { SI_SoundIsInit = 0; SI_AudioOffset = 0; if(SI_AQCallbackStruct.queue != 0) AudioQueueDispose(SI_AQCallbackStruct.queue, true); SI_AQCallbackCount = 0; memset(&SI_AQCallbackStruct, 0, sizeof(AQCallbackStruct)); Float64 sampleRate = 22050.0; sampleRate = Settings.SoundPlaybackRate; SI_SoundBufferSizeBytes = buffersize; SI_AQCallbackStruct.mDataFormat.mSampleRate = sampleRate; SI_AQCallbackStruct.mDataFormat.mFormatID = kAudioFormatLinearPCM; SI_AQCallbackStruct.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; SI_AQCallbackStruct.mDataFormat.mBytesPerPacket = 4; SI_AQCallbackStruct.mDataFormat.mFramesPerPacket = SI_IsStereo ? 1 : 2; SI_AQCallbackStruct.mDataFormat.mBytesPerFrame = SI_IsStereo ? 4 : 2; SI_AQCallbackStruct.mDataFormat.mChannelsPerFrame = SI_IsStereo ? 2 : 1; SI_AQCallbackStruct.mDataFormat.mBitsPerChannel = Settings.SixteenBitSound ? 16: 8; /* Pre-buffer before we turn on audio */ UInt32 err; err = AudioQueueNewOutput(&SI_AQCallbackStruct.mDataFormat, AQBufferCallback, NULL, NULL, kCFRunLoopCommonModes, 0, &SI_AQCallbackStruct.queue); for(int i=0; i<SI_AUDIO_BUFFER_COUNT; i++) { err = AudioQueueAllocateBuffer(SI_AQCallbackStruct.queue, SI_SoundBufferSizeBytes, &SI_AQCallbackStruct.mBuffers[i]); memset(SI_AQCallbackStruct.mBuffers[i]->mAudioData, 0, SI_SoundBufferSizeBytes); SI_AQCallbackStruct.mBuffers[i]->mAudioDataByteSize = SI_SoundBufferSizeBytes; //samples_per_frame * 2; //inData->mDataFormat.mBytesPerFrame; //(inData->frameCount * 4 < (sndOutLen) ? inData->frameCount * 4 : (sndOutLen)); AudioQueueEnqueueBuffer(SI_AQCallbackStruct.queue, SI_AQCallbackStruct.mBuffers[i], 0, NULL); } SI_SoundIsInit = 1; err = AudioQueueStart(SI_AQCallbackStruct.queue, NULL); return 0; }
void AudioStreamDecoder::PropertyCallback(AudioFileStreamID stream, AudioFileStreamPropertyID property, UInt32* flags) { if (property != kAudioFileStreamProperty_ReadyToProducePackets) return; long err; void* buffer = NULL; unsigned char writable; AudioStreamBasicDescription desc = {0}; UInt32 size = sizeof(desc); BAIL_IF(!stream || stream != mStream, "Invalid stream %p\n", stream); err = AudioFileStreamGetProperty(mStream, kAudioFileStreamProperty_DataFormat, &size, &desc); BAIL_IF(err, "AudioFileStreamGetProperty returned %ld\n", err); err = AudioQueueNewOutput(&desc, StaticBufferCompleteCallback, this, NULL, NULL, 0, &mQueue); BAIL_IF(err, "AudioQueueNewOutput returned %ld\n", err); err = AudioQueueAddPropertyListener(mQueue, kAudioQueueProperty_IsRunning, StaticQueueRunningCallback, this); BAIL_IF(err, "AudioQueueAddPropertyListener returned %ld\n", err); for (int i = 0; i < kBufferCount; i++) { err = AudioQueueAllocateBufferWithPacketDescriptions(mQueue, kBufferSize, kBufferPacketDescs, mBuffers + i); BAIL_IF(err, "AudioQueueAllocateBuffer returned %ld\n", err); } mCurrentBuffer = mBuffers; (*mCurrentBuffer)->mUserData = this; err = AudioFileStreamGetPropertyInfo(mStream, kAudioFileStreamProperty_MagicCookieData, &size, &writable); BAIL_IF(err, "AudioFileStreamGetPropertyInfo returned %ld\n", err); buffer = malloc(size); BAIL_IF(!buffer, "Failed to allocate %u byte buffer for cookie\n", (unsigned int)size); err = AudioFileStreamGetProperty(mStream, kAudioFileStreamProperty_MagicCookieData, &size, buffer); BAIL_IF(err, "AudioFileStreamGetProperty returned %ld\n", err); err = AudioQueueSetProperty(mQueue, kAudioQueueProperty_MagicCookie, buffer, size); BAIL_IF(err, "AudioQueueSetProperty returned %ld\n", err); bail: free(buffer); }
void DZAudioQueuePlayer::onProperty(AudioFileStreamPropertyID pID) { UInt32 propertySize = 0; switch (pID) { // Create audio queue with given data format. case kAudioFileStreamProperty_DataFormat: propertySize = sizeof(this->_format); if (dzDebugOK(AudioFileStreamGetProperty(this->_parser, pID, &(propertySize), &(this->_format)), "Fail to get audio file stream property: DataFormat.")) { if (this->_queue != NULL) { dzDebug(!noErr, "Audio file stream duplicated data format."); } else { if (dzDebugError(AudioQueueNewOutput(&(this->_format), QueueCallback, this, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &(this->_queue)), "Create new output audio queue failed.")) { this->_queue = NULL; } } } break; // Extract magic cookie data. case kAudioFileStreamProperty_MagicCookieData: if (noErr == AudioFileStreamGetPropertyInfo(this->_parser, pID, &(propertySize), NULL)) { this->_magicCookie = malloc(propertySize); this->_magicCookieSize = propertySize; if (this->_magicCookie != NULL && dzDebugError(AudioFileStreamGetProperty(this->_parser, pID, &(propertySize), this->_magicCookie), "Fail to get audio file stream property: MagicCookieData.")) { free(this->_magicCookie); this->_magicCookie = NULL; this->_magicCookieSize = 0; } } break; // Set magic cookie data if any. (Queue shall be already created.) case kAudioFileStreamProperty_ReadyToProducePackets: if (this->_queue != NULL && this->_magicCookie != NULL) { dzDebug(AudioQueueSetProperty(this->_queue, kAudioQueueProperty_MagicCookie, this->_magicCookie, this->_magicCookieSize), "Fail to set audio queue property: MagicCookie."); } if (this->_queue != NULL && this->_parser != NULL) { this->_status = DZAudioQueuePlayerStatus_ReadyToStart; } break; default: break; } }
int app_OpenSound() { Float64 sampleRate = 44100.0; int i; UInt32 bufferBytes; Uint32 err; app_MuteSound(); soundInit = 0; if(!config.enable_sound) return 0; in.mDataFormat.mSampleRate = sampleRate; in.mDataFormat.mFormatID = kAudioFormatLinearPCM; in.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; in.mDataFormat.mBytesPerPacket = 4; in.mDataFormat.mFramesPerPacket = 2; in.mDataFormat.mBytesPerFrame = 2; in.mDataFormat.mChannelsPerFrame = 1; in.mDataFormat.mBitsPerChannel = 16; // Pre-buffer before we turn on audio err = AudioQueueNewOutput(&in.mDataFormat, AQBufferCallback, NULL, NULL, CFRunLoopGetCurrent(), kCFRunLoopDefaultMode, 0, &in.queue); bufferBytes = IPHONE_AUDIO_BUFFER_SIZE; for(i = 0; i < IPHONE_AUDIO_BUFFERS; i++) { err = AudioQueueAllocateBuffer(in.queue, bufferBytes, &in.mBuffers[i]); in.mBuffers[i]->mAudioDataByteSize = IPHONE_AUDIO_BUFFER_SIZE; // "Prime" by calling the callback once per buffer AudioQueueEnqueueBuffer(in.queue, in.mBuffers[i], 0, NULL); } soundInit = 1; err = AudioQueueStart(in.queue, NULL); return 0; }
u32 AudioPluginOSX::AudioThread(void * arg) { AudioPluginOSX * plugin = static_cast<AudioPluginOSX *>(arg); AudioStreamBasicDescription format; format.mSampleRate = kOutputFrequency; format.mFormatID = kAudioFormatLinearPCM; format.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; format.mBitsPerChannel = 8 * sizeof(s16); format.mChannelsPerFrame = kNumChannels; format.mBytesPerFrame = sizeof(s16) * kNumChannels; format.mFramesPerPacket = 1; format.mBytesPerPacket = format.mBytesPerFrame * format.mFramesPerPacket; format.mReserved = 0; AudioQueueRef queue; AudioQueueBufferRef buffers[kNumBuffers]; AudioQueueNewOutput(&format, &AudioCallback, plugin, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &queue); for (u32 i = 0; i < kNumBuffers; ++i) { AudioQueueAllocateBuffer(queue, kAudioQueueBufferLength, &buffers[i]); buffers[i]->mAudioDataByteSize = kAudioQueueBufferLength; AudioCallback(plugin, queue, buffers[i]); } AudioQueueStart(queue, NULL); CFRunLoopRun(); AudioQueueStop(queue, false); AudioQueueDispose(queue, false); for (u32 i = 0; i < kNumBuffers; ++i) { AudioQueueFreeBuffer(queue, buffers[i]); buffers[i] = NULL; } return 0; }
bool IPhoneSoundDevice::Init() { // Initialize the default audio session object to tell it // to allow background music, and to tell us when audio // gets resumed (like if a phone call comes in, iphone takes // over audio. If the user then ignores the phone call, the // audio needs to be turned on again. AudioSessionInitialize(NULL, NULL, wi::InterruptionListener, this); UInt32 category = kAudioSessionCategory_UserInterfaceSoundEffects; AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category); AudioSessionSetActive(true); // Set up streaming AudioStreamBasicDescription desc; desc.mSampleRate = 8000; desc.mFormatID = kAudioFormatLinearPCM; desc.mFormatFlags = kAudioFormatFlagIsPacked; desc.mBytesPerPacket = 1; desc.mFramesPerPacket = 1; desc.mBytesPerFrame = 1; desc.mChannelsPerFrame = 1; desc.mBitsPerChannel = 8; OSStatus err = AudioQueueNewOutput(&desc, AudioCallback, this, NULL, kCFRunLoopCommonModes, 0, &m_haq); if (err != 0) { return false; } for (int i = 0; i < kcBuffers; i++) { err = AudioQueueAllocateBuffer(m_haq, kcbBuffer, &m_apaqb[i]); if (err != 0) { return false; } } return true; }
OSStatus SetupQueue(BG_FileInfo *inFileInfo) { UInt32 size = 0; OSStatus result = AudioQueueNewOutput(&inFileInfo->mFileFormat, QueueCallback, this, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &mQueue); AssertNoError("Error creating queue", end); // (2) If the file has a cookie, we should get it and set it on the AQ size = sizeof(UInt32); result = AudioFileGetPropertyInfo (inFileInfo->mAFID, kAudioFilePropertyMagicCookieData, &size, NULL); if (!result && size) { char* cookie = new char [size]; result = AudioFileGetProperty (inFileInfo->mAFID, kAudioFilePropertyMagicCookieData, &size, cookie); AssertNoError("Error getting magic cookie", end); result = AudioQueueSetProperty(mQueue, kAudioQueueProperty_MagicCookie, cookie, size); delete [] cookie; AssertNoError("Error setting magic cookie", end); } // channel layout OSStatus err = AudioFileGetPropertyInfo(inFileInfo->mAFID, kAudioFilePropertyChannelLayout, &size, NULL); if (err == noErr && size > 0) { AudioChannelLayout *acl = (AudioChannelLayout *)malloc(size); result = AudioFileGetProperty(inFileInfo->mAFID, kAudioFilePropertyChannelLayout, &size, acl); AssertNoError("Error getting channel layout from file", end); result = AudioQueueSetProperty(mQueue, kAudioQueueProperty_ChannelLayout, acl, size); free(acl); AssertNoError("Error setting channel layout on queue", end); } // add a notification proc for when the queue stops result = AudioQueueAddPropertyListener(mQueue, kAudioQueueProperty_IsRunning, QueueStoppedProc, this); AssertNoError("Error adding isRunning property listener to queue", end); // we need to reset this variable so that if the queue is stopped mid buffer we don't dispose it mMakeNewQueueWhenStopped = false; // volume result = SetVolume(mVolume); //end: return result; }
void audio_init(audio_player_t *player) { int i; TAILQ_INIT(&player->af.q); player->af.qlen = 0; pthread_mutex_init(&player->af.mutex, NULL); pthread_cond_init(&player->af.cond, NULL); player->internal_state = malloc(sizeof(state_t)); state_t *state = (state_t *) player->internal_state; bzero(state, sizeof(state_t)); state->desc.mFormatID = kAudioFormatLinearPCM; state->desc.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; state->desc.mSampleRate = 44100; state->desc.mChannelsPerFrame = 2; state->desc.mFramesPerPacket = 1; state->desc.mBytesPerFrame = sizeof (short) * state->desc.mChannelsPerFrame; state->desc.mBytesPerPacket = state->desc.mBytesPerFrame; state->desc.mBitsPerChannel = (state->desc.mBytesPerFrame*8) / state->desc.mChannelsPerFrame; state->desc.mReserved = 0; state->buffer_size = state->desc.mBytesPerFrame * state->desc.mSampleRate; if (noErr != AudioQueueNewOutput(&state->desc, audio_callback, player, NULL, NULL, 0, &state->queue)) { fprintf(stderr, "audioqueue error\n"); return; } AudioQueueAddPropertyListener(state->queue, kAudioQueueProperty_IsRunning, stopped_callback, player); // Start some empty playback so we'll get the callbacks that fill in the actual audio. for (i = 0; i < BUFFER_COUNT; ++i) { AudioQueueAllocateBuffer(state->queue, state->buffer_size, &state->buffers[i]); state->free_buffers[i] = state->buffers[i]; } state->free_buffer_count = BUFFER_COUNT; state->playing = false; state->should_stop = false; }
void audio_init(audio_fifo_t *af) { int i; TAILQ_INIT(&af->q); af->qlen = 0; pthread_mutex_init(&af->mutex, NULL); pthread_cond_init(&af->cond, NULL); bzero(&state, sizeof(state)); state.desc.mFormatID = kAudioFormatLinearPCM; state.desc.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; state.desc.mSampleRate = 44100; state.desc.mChannelsPerFrame = 2; state.desc.mFramesPerPacket = 1; state.desc.mBytesPerFrame = sizeof(short) * state.desc.mChannelsPerFrame; state.desc.mBytesPerPacket = state.desc.mBytesPerFrame; state.desc.mBitsPerChannel = (state.desc.mBytesPerFrame*8)/state.desc.mChannelsPerFrame; state.desc.mReserved = 0; state.buffer_size = state.desc.mBytesPerFrame * kSampleCountPerBuffer; if (noErr != AudioQueueNewOutput(&state.desc, audio_callback, af, NULL, NULL, 0, &state.queue)) { printf("audioqueue error\n"); return; } // Start some empty playback so we'll get the callbacks that fill in the actual audio. for (i = 0; i < BUFFER_COUNT; ++i) { AudioQueueAllocateBuffer(state.queue, state.buffer_size, &state.buffers[i]); state.buffers[i]->mAudioDataByteSize = state.buffer_size; AudioQueueEnqueueBuffer(state.queue, state.buffers[i], 0, NULL); } if (noErr != AudioQueueStart(state.queue, NULL)) puts("AudioQueueStart failed"); }
void Audio_Queue::handlePropertyChange(AudioFileStreamID inAudioFileStream, AudioFileStreamPropertyID inPropertyID, UInt32 *ioFlags) { OSStatus err = noErr; AQ_TRACE("found property '%lu%lu%lu%lu'\n", (inPropertyID>>24)&255, (inPropertyID>>16)&255, (inPropertyID>>8)&255, inPropertyID&255); switch (inPropertyID) { case kAudioFileStreamProperty_ReadyToProducePackets: { cleanup(); // the file stream parser is now ready to produce audio packets. // get the stream format. memset(&m_streamDesc, 0, sizeof(m_streamDesc)); UInt32 asbdSize = sizeof(m_streamDesc); err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataFormat, &asbdSize, &m_streamDesc); if (err) { AQ_TRACE("%s: error in kAudioFileStreamProperty_DataFormat\n", __PRETTY_FUNCTION__); m_lastError = err; break; } // create the audio queue err = AudioQueueNewOutput(&m_streamDesc, audioQueueOutputCallback, this, CFRunLoopGetCurrent(), NULL, 0, &m_outAQ); if (err) { AQ_TRACE("%s: error in AudioQueueNewOutput\n", __PRETTY_FUNCTION__); if (m_delegate) { m_delegate->audioQueueInitializationFailed(); } m_lastError = err; break; } // allocate audio queue buffers for (unsigned int i = 0; i < AQ_BUFFERS; ++i) { err = AudioQueueAllocateBuffer(m_outAQ, AQ_BUFSIZ, &m_audioQueueBuffer[i]); if (err) { /* If allocating the buffers failed, everything else will fail, too. * Dispose the queue so that we can later on detect that this * queue in fact has not been initialized. */ AQ_TRACE("%s: error in AudioQueueAllocateBuffer\n", __PRETTY_FUNCTION__); (void)AudioQueueDispose(m_outAQ, true); m_outAQ = 0; if (m_delegate) { m_delegate->audioQueueInitializationFailed(); } m_lastError = err; break; } } setCookiesForStream(inAudioFileStream); // listen for kAudioQueueProperty_IsRunning err = AudioQueueAddPropertyListener(m_outAQ, kAudioQueueProperty_IsRunning, audioQueueIsRunningCallback, this); if (err) { AQ_TRACE("%s: error in AudioQueueAddPropertyListener\n", __PRETTY_FUNCTION__); m_lastError = err; break; } break; } } }
bool load(CFURLRef url) { OSStatus status; memset(&aqData,0,sizeof(aqData)); timeBase = 0; status = AudioFileOpenURL(url,kAudioFileReadPermission,0,&aqData.mAudioFile); checkStatus(status); if( status != noErr ) return false; UInt32 dataFormatSize = sizeof (aqData.mDataFormat); // 1 status = AudioFileGetProperty ( // 2 aqData.mAudioFile, // 3 kAudioFilePropertyDataFormat, // 4 &dataFormatSize, // 5 &aqData.mDataFormat // 6 ); checkStatus(status); status = AudioQueueNewOutput ( // 1 &aqData.mDataFormat, // 2 HandleOutputBuffer, // 3 &aqData, // 4 CFRunLoopGetCurrent (), // 5 kCFRunLoopCommonModes, // 6 0, // 7 &aqData.mQueue // 8 ); checkStatus(status); UInt32 maxPacketSize; UInt32 propertySize = sizeof (maxPacketSize); status = AudioFileGetProperty ( // 1 aqData.mAudioFile, // 2 kAudioFilePropertyPacketSizeUpperBound, // 3 &propertySize, // 4 &maxPacketSize // 5 ); checkStatus(status); deriveBufferSize ( // 6 aqData.mDataFormat, // 7 maxPacketSize, // 8 0.5, // 9 &aqData.bufferByteSize, // 10 &aqData.mNumPacketsToRead // 11 ); bool isFormatVBR = ( // 1 aqData.mDataFormat.mBytesPerPacket == 0 || aqData.mDataFormat.mFramesPerPacket == 0 ); if (isFormatVBR) { // 2 aqData.mPacketDescs = (AudioStreamPacketDescription*) malloc ( aqData.mNumPacketsToRead * sizeof (AudioStreamPacketDescription) ); } else { // 3 aqData.mPacketDescs = NULL; } UInt32 cookieSize = sizeof (UInt32); // 1 OSStatus couldNotGetProperty = // 2 AudioFileGetPropertyInfo ( // 3 aqData.mAudioFile, // 4 kAudioFilePropertyMagicCookieData, // 5 &cookieSize, // 6 NULL // 7 ); // checkStatus(couldNotGetProperty); if (!couldNotGetProperty && cookieSize) { // 8 char* magicCookie = (char *) malloc (cookieSize); status = AudioFileGetProperty ( // 9 aqData.mAudioFile, // 10 kAudioFilePropertyMagicCookieData, // 11 &cookieSize, // 12 magicCookie // 13 ); checkStatus(status); status = AudioQueueSetProperty ( // 14 aqData.mQueue, // 15 kAudioQueueProperty_MagicCookie, // 16 magicCookie, // 17 cookieSize // 18 ); checkStatus(status); free (magicCookie); // 19 } return true; }
int main (int argc, const char *argv[]) { OSStatus result; State *state = NULL; if (argc != 2) { printf("Usage: play <file>\n"); goto error; } state = StateCreate(); // // open the audio file // CFURLRef fileURL = CFURLCreateFromFileSystemRepresentation( kCFAllocatorDefault, (const UInt8*)argv[1], strlen(argv[1]), false ); if (!fileURL) { Error("Invalid filename"); goto error; } result = AudioFileOpenURL( fileURL, kAudioFileReadPermission, 0, &state->audioFile ); CFRelease(fileURL); if (result) { Error("Invalid audio file"); goto error; } // // determine properties of stream including maximum packet size // UInt32 propertyDataSize; AudioStreamBasicDescription streamDescription; propertyDataSize = sizeof(streamDescription); result = AudioFileGetProperty( state->audioFile, kAudioFilePropertyDataFormat, &propertyDataSize, &streamDescription ); if (result) { Error(NULL); goto error; } UInt32 maximumPacketSize; propertyDataSize = sizeof(maximumPacketSize); result = AudioFileGetProperty( state->audioFile, kAudioFilePropertyMaximumPacketSize, &propertyDataSize, &maximumPacketSize ); if (result) { Error(NULL); goto error; } // // initialize the audio queue and allocate buffers // result = AudioQueueNewOutput( &streamDescription, AudioQueueOutput, state, CFRunLoopGetCurrent(), kCFRunLoopDefaultMode, 0, &state->audioQueue ); if (result) { Error("Failed to initialize audio queue"); goto error; } state->audioBufferSize = maximumPacketSize * PACKETS_PER_BUFFER; AudioQueueBufferRef audioQueueBuffers[NUM_BUFFERS]; for (int i = 0; i < NUM_BUFFERS; ++i) { result = AudioQueueAllocateBufferWithPacketDescriptions( state->audioQueue, state->audioBufferSize, PACKETS_PER_BUFFER, &audioQueueBuffers[i] ); if (result) { Error("Failed to initialize audio queue buffer"); goto error; } } result = AudioQueueAddPropertyListener( state->audioQueue, kAudioQueueProperty_IsRunning, AudioQueuePropertyListener, NULL ); // // prime and start the audio queue // AudioQueueOutput( state, state->audioQueue, audioQueueBuffers[0] ); result = AudioQueueStart( state->audioQueue, NULL ); if (result) { Error("Failed to start audio queue"); goto error; } // // start the run loop that will dispatch audio queue callbacks // CFRunLoopRun(); StateDestroy(state); return 0; error: if (state) { StateDestroy(state); } return 1; }
int main(int argc, const char *argv[]) { MyPlayer player = {0}; CFURLRef myFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, kPlaybackFileLocation, kCFURLPOSIXPathStyle, false); // open the audio file // CheckError(AudioFileOpenURL(myFileURL, fsRdPerm, 0, &player.playbackFile), "AudioFileOpenURL failed"); CheckError(AudioFileOpenURL(myFileURL, kAudioFileReadPermission, 0, &player.playbackFile), "AudioFileOpenURL failed"); CFRelease(myFileURL); // get the audio data format from the file AudioStreamBasicDescription dataFormat; UInt32 propSize = sizeof(dataFormat); CheckError(AudioFileGetProperty(player.playbackFile, kAudioFilePropertyDataFormat, &propSize, &dataFormat), "couldn't get file's data format"); // create a output (playback) queue AudioQueueRef queue; CheckError(AudioQueueNewOutput(&dataFormat, // ASBD MyAQOutputCallback, // Callback &player, // user data NULL, // run loop NULL, // run loop mode 0, // flags (always 0) &queue), // output: reference to AudioQueue object "AudioQueueNewOutput failed"); // adjust buffer size to represent about a half second (0.5) of audio based on this format UInt32 bufferByteSize; CalculateBytesForTime(player.playbackFile, dataFormat, 0.5, &bufferByteSize, &player.numPacketsToRead); // check if we are dealing with a VBR file. ASBDs for VBR files always have // mBytesPerPacket and mFramesPerPacket as 0 since they can fluctuate at any time. // If we are dealing with a VBR file, we allocate memory to hold the packet descriptions bool isFormatVBR = (dataFormat.mBytesPerPacket == 0 || dataFormat.mFramesPerPacket == 0); if (isFormatVBR) player.packetDescs = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * player.numPacketsToRead); else player.packetDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM) // get magic cookie from file and set on queue MyCopyEncoderCookieToQueue(player.playbackFile, queue); // allocate the buffers and prime the queue with some data before starting AudioQueueBufferRef buffers[kNumberPlaybackBuffers]; player.isDone = false; player.packetPosition = 0; int i; for (i = 0; i < kNumberPlaybackBuffers; ++i) { CheckError(AudioQueueAllocateBuffer(queue, bufferByteSize, &buffers[i]), "AudioQueueAllocateBuffer failed"); // manually invoke callback to fill buffers with data MyAQOutputCallback(&player, queue, buffers[i]); // EOF (the entire file's contents fit in the buffers) if (player.isDone) break; } //CheckError(AudioQueueAddPropertyListener(aqp.queue, kAudioQueueProperty_IsRunning, MyAQPropertyListenerCallback, &aqp), "AudioQueueAddPropertyListener(kAudioQueueProperty_IsRunning) failed"); // start the queue. this function returns immedatly and begins // invoking the callback, as needed, asynchronously. CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed"); // and wait printf("Playing...\n"); do { CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false); } while (!player.isDone /*|| gIsRunning*/); // isDone represents the state of the Audio File enqueuing. This does not mean the // Audio Queue is actually done playing yet. Since we have 3 half-second buffers in-flight // run for continue to run for a short additional time so they can be processed CFRunLoopRunInMode(kCFRunLoopDefaultMode, 2, false); // end playback player.isDone = true; CheckError(AudioQueueStop(queue, TRUE), "AudioQueueStop failed"); cleanup: AudioQueueDispose(queue, TRUE); AudioFileClose(player.playbackFile); return 0; }