void Audio_Queue::setCookiesForStream(AudioFileStreamID inAudioFileStream)
{
    OSStatus err;
    
    // get the cookie size
    UInt32 cookieSize;
    Boolean writable;
    
    err = AudioFileStreamGetPropertyInfo(inAudioFileStream, kAudioFileStreamProperty_MagicCookieData, &cookieSize, &writable);
    if (err) {
        AQ_TRACE("error in info kAudioFileStreamProperty_MagicCookieData\n");
        return;
    }
    AQ_TRACE("cookieSize %lu\n", cookieSize);
    
    // get the cookie data
    void* cookieData = calloc(1, cookieSize);
    err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_MagicCookieData, &cookieSize, cookieData);
    if (err) {
        AQ_TRACE("error in get kAudioFileStreamProperty_MagicCookieData");
        free(cookieData);
        return;
    }
    
    // set the cookie on the queue.
    err = AudioQueueSetProperty(m_outAQ, kAudioQueueProperty_MagicCookie, cookieData, cookieSize);
    free(cookieData);
    if (err) {
        AQ_TRACE("error in set kAudioQueueProperty_MagicCookie");
    }
}
示例#2
0
int32_t setup_queue(
	ALACMagicCookie cookie,
	PlayerInfo *playerInfo,
	uint32_t buffer_size,
	uint32_t num_buffers,
	uint32_t num_packets
) {
  // Create Audio Queue for ALAC
  AudioStreamBasicDescription inFormat = {0};
  inFormat.mSampleRate = ntohl(cookie.sampleRate);
  inFormat.mFormatID = kAudioFormatAppleLossless;
  inFormat.mFormatFlags = 0; // ALAC uses no flags
  inFormat.mBytesPerPacket = 0; // Variable size (must use AudioStreamPacketDescription)
  inFormat.mFramesPerPacket = ntohl(cookie.frameLength);
  inFormat.mBytesPerFrame = 0; // Compressed
  inFormat.mChannelsPerFrame = 2; // Stero TODO: get from fmtp?
  inFormat.mBitsPerChannel = 0; // Compressed
  inFormat.mReserved = 0;

  OSStatus err = AudioQueueNewOutput(
      &inFormat,
      c_callback,
      playerInfo, // User data
      NULL, // Run on audio queue's thread
      NULL, // Callback run loop's mode
      0, // Reserved
      &playerInfo->queue);

  if (err) return err;

  // Need to set the magic cookie too (tail fmtp)
  err = AudioQueueSetProperty(playerInfo->queue, kAudioQueueProperty_MagicCookie,
			&cookie, sizeof(ALACMagicCookie));
  if (err) return err;

	// Create input buffers, and enqueue using callback
	for (int i = 0; i < num_buffers; i++) {
		AudioQueueBufferRef buffer;
		err = AudioQueueAllocateBufferWithPacketDescriptions(
				playerInfo->queue, buffer_size, num_packets, &buffer);
		if (err) return err;

		c_callback(playerInfo, playerInfo->queue, buffer);
	}

	// Volume full
	err = AudioQueueSetParameter(playerInfo->queue, kAudioQueueParam_Volume, 1.0);
	if (err) return err;

  // Prime
  err = AudioQueuePrime(playerInfo->queue, 0, NULL);
  if (err) return err;

	// Start
	err = AudioQueueStart(playerInfo->queue, NULL);
	if (err) return err;

	return 0;
}
示例#3
0
 music_obj<audio_queue_driver>::music_obj(const boost::shared_ptr<ifdstream>& ifd, bool loop, float gain,
                                          float start, float end)
 : packet_index_(0)
 , start_packet_index_(0)
 , stop_packet_index_(0)
 , volume_(gain)
 , loop_(loop)
 , is_paused_(false)
 , ifd_(ifd)
 {        
     LOG("Got ifdstream from path..");
     
     OSStatus res = AudioFileOpenWithCallbacks(this, &music_obj::af_read_cb, &music_obj::af_write_cb,
                         &music_obj::af_get_size_cb, &music_obj::af_set_size_cb, 
                             kAudioFileCAFType, &audio_file_);
     
     if(res)
     {
         throw sys_exception("audio_queue_driver: couldn't open audio file in liverpool fs. AudioFile returned " 
                                 + boost::lexical_cast<std::string>(res));
     }
     
     UInt32 size = sizeof(data_format_);
     AudioFileGetProperty(audio_file_, kAudioFilePropertyDataFormat, &size, &data_format_);
     
     AudioQueueNewOutput(&data_format_, &music_obj<audio_queue_driver>::buffer_cb, this, NULL, NULL, 0, &queue_);        
     AudioQueueAddPropertyListener(queue_, kAudioQueueProperty_IsRunning, &music_obj<audio_queue_driver>::playback_cb, this);
     
     if (data_format_.mBytesPerPacket == 0 || data_format_.mFramesPerPacket == 0)
     {
         size = sizeof(max_packet_size_);
         AudioFileGetProperty(audio_file_, kAudioFilePropertyPacketSizeUpperBound, &size, &max_packet_size_);
         if (max_packet_size_ > BUFFER_SIZE_BYTES)
         {
             max_packet_size_ = BUFFER_SIZE_BYTES;
         }
         
         num_packets_to_read_ = BUFFER_SIZE_BYTES / max_packet_size_;
         packet_descriptions_ = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * num_packets_to_read_);
     }
     else
     {
         num_packets_to_read_ = BUFFER_SIZE_BYTES / data_format_.mBytesPerPacket;
         packet_descriptions_ = NULL;
     }
     
     AudioFileGetPropertyInfo(audio_file_, kAudioFilePropertyMagicCookieData, &size, NULL);
     if (size > 0)
     {
         char* cookie = (char*)malloc(sizeof(char) * size);
         AudioFileGetProperty(audio_file_, kAudioFilePropertyMagicCookieData, &size, cookie);
         AudioQueueSetProperty(queue_, kAudioQueueProperty_MagicCookie, cookie, size);
         free(cookie);
     }
     
     calculate_seek(start, end);        
     volume(volume_);
     prime();   
 }
void MyPropertyListenerProc(	void *							inClientData,
								AudioFileStreamID				inAudioFileStream,
								AudioFileStreamPropertyID		inPropertyID,
								UInt32 *						ioFlags)
{	
	// this is called by audio file stream when it finds property values
	MyData* myData = (MyData*)inClientData;
	OSStatus err = noErr;

	printf("found property '%c%c%c%c'\n", (inPropertyID>>24)&255, (inPropertyID>>16)&255, (inPropertyID>>8)&255, inPropertyID&255);

	switch (inPropertyID) {
		case kAudioFileStreamProperty_ReadyToProducePackets :
		{
			// the file stream parser is now ready to produce audio packets.
			// get the stream format.
			AudioStreamBasicDescription asbd;
			UInt32 asbdSize = sizeof(asbd);
			err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataFormat, &asbdSize, &asbd);
			if (err) { PRINTERROR("get kAudioFileStreamProperty_DataFormat"); myData->failed = true; break; }
			
			// create the audio queue
			err = AudioQueueNewOutput(&asbd, MyAudioQueueOutputCallback, myData, NULL, NULL, 0, &myData->audioQueue);
			if (err) { PRINTERROR("AudioQueueNewOutput"); myData->failed = true; break; }
			
			// allocate audio queue buffers
			for (unsigned int i = 0; i < kNumAQBufs; ++i) {
				err = AudioQueueAllocateBuffer(myData->audioQueue, kAQBufSize, &myData->audioQueueBuffer[i]);
				if (err) { PRINTERROR("AudioQueueAllocateBuffer"); myData->failed = true; break; }
			}

			// get the cookie size
			UInt32 cookieSize;
			Boolean writable;
			err = AudioFileStreamGetPropertyInfo(inAudioFileStream, kAudioFileStreamProperty_MagicCookieData, &cookieSize, &writable);
			if (err) { PRINTERROR("info kAudioFileStreamProperty_MagicCookieData"); break; }
			printf("cookieSize %d\n", cookieSize);

			// get the cookie data
			void* cookieData = calloc(1, cookieSize);
			err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_MagicCookieData, &cookieSize, cookieData);
			if (err) { PRINTERROR("get kAudioFileStreamProperty_MagicCookieData"); free(cookieData); break; }

			// set the cookie on the queue.
			err = AudioQueueSetProperty(myData->audioQueue, kAudioQueueProperty_MagicCookie, cookieData, cookieSize);
			free(cookieData);
			if (err) { PRINTERROR("set kAudioQueueProperty_MagicCookie"); break; }

			// listen for kAudioQueueProperty_IsRunning
			err = AudioQueueAddPropertyListener(myData->audioQueue, kAudioQueueProperty_IsRunning, MyAudioQueueIsRunningCallback, myData);
			if (err) { PRINTERROR("AudioQueueAddPropertyListener"); myData->failed = true; break; }
			
			break;
		}
	}
}
示例#5
0
		OSStatus SetupQueue(BG_FileInfo *inFileInfo)
		{
			UInt32 size = 0;
			OSStatus result = AudioQueueNewOutput(&inFileInfo->mFileFormat, QueueCallback, this, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &mQueue);
					AssertNoError("Error creating queue", end);

			// (2) If the file has a cookie, we should get it and set it on the AQ
			size = sizeof(UInt32);
			result = AudioFileGetPropertyInfo (inFileInfo->mAFID, kAudioFilePropertyMagicCookieData, &size, NULL);

			if (!result && size) {
				char* cookie = new char [size];		
				result = AudioFileGetProperty (inFileInfo->mAFID, kAudioFilePropertyMagicCookieData, &size, cookie);
					AssertNoError("Error getting magic cookie", end);
				result = AudioQueueSetProperty(mQueue, kAudioQueueProperty_MagicCookie, cookie, size);
				delete [] cookie;
					AssertNoError("Error setting magic cookie", end);
			}

			// channel layout
			OSStatus err = AudioFileGetPropertyInfo(inFileInfo->mAFID, kAudioFilePropertyChannelLayout, &size, NULL);
			if (err == noErr && size > 0) {
				AudioChannelLayout *acl = (AudioChannelLayout *)malloc(size);
				result = AudioFileGetProperty(inFileInfo->mAFID, kAudioFilePropertyChannelLayout, &size, acl);
					AssertNoError("Error getting channel layout from file", end);
				result = AudioQueueSetProperty(mQueue, kAudioQueueProperty_ChannelLayout, acl, size);
				free(acl);
					AssertNoError("Error setting channel layout on queue", end);
			}
			
			// add a notification proc for when the queue stops
			result = AudioQueueAddPropertyListener(mQueue, kAudioQueueProperty_IsRunning, QueueStoppedProc, this);
				AssertNoError("Error adding isRunning property listener to queue", end);
				
			// we need to reset this variable so that if the queue is stopped mid buffer we don't dispose it 
			mMakeNewQueueWhenStopped = false;
			
			// volume
			result = SetVolume(mVolume);
			
		//end:
			return result;
		}
示例#6
0
 music_obj<audio_queue_driver>::music_obj(const std::string& file_path, bool loop, float gain,
                                          float start, float end)
 : packet_index_(0)
 , start_packet_index_(0)
 , stop_packet_index_(0)
 , volume_(gain)
 , loop_(loop)
 , is_paused_(false)
 {
     CFURLRef file_url = CFURLCreateFromFileSystemRepresentation(NULL, (const UInt8 *)file_path.c_str(), file_path.size(), false);
     OSStatus res = AudioFileOpenURL(file_url, kAudioFileReadPermission, kAudioFileCAFType, &audio_file_);
     CFRelease(file_url);
     
     if(res)
     {
         throw sys_exception("audio_queue_driver: couldn't open audio file at '" + file_path + "'");
     }
     
     UInt32 size = sizeof(data_format_);
     AudioFileGetProperty(audio_file_, kAudioFilePropertyDataFormat, &size, &data_format_);
     
     AudioQueueNewOutput(&data_format_, &music_obj<audio_queue_driver>::buffer_cb, this, NULL, NULL, 0, &queue_);        
     AudioQueueAddPropertyListener(queue_, kAudioQueueProperty_IsRunning, &music_obj<audio_queue_driver>::playback_cb, this);
     
     if (data_format_.mBytesPerPacket == 0 || data_format_.mFramesPerPacket == 0)
     {
         size = sizeof(max_packet_size_);
         AudioFileGetProperty(audio_file_, kAudioFilePropertyPacketSizeUpperBound, &size, &max_packet_size_);
         if (max_packet_size_ > BUFFER_SIZE_BYTES)
         {
             max_packet_size_ = BUFFER_SIZE_BYTES;
         }
         
         num_packets_to_read_ = BUFFER_SIZE_BYTES / max_packet_size_;
         packet_descriptions_ = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * num_packets_to_read_);
     }
     else
     {
         num_packets_to_read_ = BUFFER_SIZE_BYTES / data_format_.mBytesPerPacket;
         packet_descriptions_ = NULL;
     }
     
     AudioFileGetPropertyInfo(audio_file_, kAudioFilePropertyMagicCookieData, &size, NULL);
     if (size > 0)
     {
         char* cookie = (char*)malloc(sizeof(char) * size);
         AudioFileGetProperty(audio_file_, kAudioFilePropertyMagicCookieData, &size, cookie);
         AudioQueueSetProperty(queue_, kAudioQueueProperty_MagicCookie, cookie, size);
         free(cookie);
     }
     
     calculate_seek(start, end);
     volume(volume_);
     prime();
 }
// many encoded formats require a 'magic cookie'. if the file has a cookie we get it
// and configure the queue with it
static void MyCopyEncoderCookieToQueue(AudioFileID theFile, AudioQueueRef queue ) {
	UInt32 propertySize;
	OSStatus result = AudioFileGetPropertyInfo (theFile, kAudioFilePropertyMagicCookieData, &propertySize, NULL);
	if (result == noErr && propertySize > 0)
	{
		Byte* magicCookie = (UInt8*)malloc(sizeof(UInt8) * propertySize);	
		CheckError(AudioFileGetProperty (theFile, kAudioFilePropertyMagicCookieData, &propertySize, magicCookie), "get cookie from file failed");
		CheckError(AudioQueueSetProperty(queue, kAudioQueueProperty_MagicCookie, magicCookie, propertySize), "set cookie on queue failed");
		free(magicCookie);
	}
}
示例#8
0
AudioQueueLevelMeterState Audio_Queue::levels()
{
    if (!m_levelMeteringEnabled) {
        UInt32 enabledLevelMeter = true;
        AudioQueueSetProperty(m_outAQ,
                              kAudioQueueProperty_EnableLevelMetering,
                              &enabledLevelMeter,
                              sizeof(UInt32));
        
        m_levelMeteringEnabled = true;
    }
    
    AudioQueueLevelMeterState levelMeter;
    UInt32 levelMeterSize = sizeof(AudioQueueLevelMeterState);
    AudioQueueGetProperty(m_outAQ, kAudioQueueProperty_CurrentLevelMeterDB, &levelMeter, &levelMeterSize);
    return levelMeter;
}
void AudioStreamDecoder::PropertyCallback(AudioFileStreamID stream, AudioFileStreamPropertyID property, UInt32* flags)
{
	if (property != kAudioFileStreamProperty_ReadyToProducePackets)
		return;

	long err;
	void* buffer = NULL;
	unsigned char writable;
	AudioStreamBasicDescription desc = {0};
	UInt32 size = sizeof(desc);

	BAIL_IF(!stream || stream != mStream, "Invalid stream %p\n", stream);

	err = AudioFileStreamGetProperty(mStream, kAudioFileStreamProperty_DataFormat, &size, &desc);
	BAIL_IF(err, "AudioFileStreamGetProperty returned %ld\n", err);

	err = AudioQueueNewOutput(&desc, StaticBufferCompleteCallback, this, NULL, NULL, 0, &mQueue);
	BAIL_IF(err, "AudioQueueNewOutput returned %ld\n", err);

	err = AudioQueueAddPropertyListener(mQueue, kAudioQueueProperty_IsRunning, StaticQueueRunningCallback, this);
	BAIL_IF(err, "AudioQueueAddPropertyListener returned %ld\n", err);

	for (int i = 0; i < kBufferCount; i++)
	{
		err = AudioQueueAllocateBufferWithPacketDescriptions(mQueue, kBufferSize, kBufferPacketDescs, mBuffers + i);
		BAIL_IF(err, "AudioQueueAllocateBuffer returned %ld\n", err);
	}

	mCurrentBuffer = mBuffers;
	(*mCurrentBuffer)->mUserData = this;

	err = AudioFileStreamGetPropertyInfo(mStream, kAudioFileStreamProperty_MagicCookieData, &size, &writable);
	BAIL_IF(err, "AudioFileStreamGetPropertyInfo returned %ld\n", err);

	buffer = malloc(size);
	BAIL_IF(!buffer, "Failed to allocate %u byte buffer for cookie\n", (unsigned int)size);

	err = AudioFileStreamGetProperty(mStream, kAudioFileStreamProperty_MagicCookieData, &size, buffer);
	BAIL_IF(err, "AudioFileStreamGetProperty returned %ld\n", err);

	err = AudioQueueSetProperty(mQueue, kAudioQueueProperty_MagicCookie, buffer, size);
	BAIL_IF(err, "AudioQueueSetProperty returned %ld\n", err);

bail:
	free(buffer);
}
示例#10
0
void Audio_Queue::setPlayRate(float playRate)
{
    if (!m_outAQ) {
        return;
    }
    UInt32 enableTimePitchConversion = (playRate != 1.0);
    
    if (playRate < 0.5) {
        playRate = 0.5;
    }
    if (playRate > 2.0) {
        playRate = 2.0;
    }

    AudioQueueSetProperty (m_outAQ, kAudioQueueProperty_EnableTimePitch, &enableTimePitchConversion, sizeof(enableTimePitchConversion));
    
    AudioQueueSetParameter(m_outAQ, kAudioQueueParam_PlayRate, playRate);
}
示例#11
0
void DZAudioQueuePlayer::onProperty(AudioFileStreamPropertyID pID)
{
    UInt32 propertySize = 0;
    switch (pID) {
        // Create audio queue with given data format.
        case kAudioFileStreamProperty_DataFormat:
            propertySize = sizeof(this->_format);
            if (dzDebugOK(AudioFileStreamGetProperty(this->_parser, pID, &(propertySize), &(this->_format)), "Fail to get audio file stream property: DataFormat.")) {
                if (this->_queue != NULL) {
                    dzDebug(!noErr, "Audio file stream duplicated data format.");
                } else {
                    if (dzDebugError(AudioQueueNewOutput(&(this->_format), QueueCallback, this, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &(this->_queue)), "Create new output audio queue failed.")) {
                        this->_queue = NULL;
                    }
                }
            }
            break;
            
        // Extract magic cookie data.
        case kAudioFileStreamProperty_MagicCookieData:
            if (noErr == AudioFileStreamGetPropertyInfo(this->_parser, pID, &(propertySize), NULL)) {
                this->_magicCookie = malloc(propertySize);
                this->_magicCookieSize = propertySize;
                if (this->_magicCookie != NULL && dzDebugError(AudioFileStreamGetProperty(this->_parser, pID, &(propertySize), this->_magicCookie), "Fail to get audio file stream property: MagicCookieData.")) {
                    free(this->_magicCookie);
                    this->_magicCookie = NULL;
                    this->_magicCookieSize = 0;
                }
            }
            break;
            
        // Set magic cookie data if any. (Queue shall be already created.)
        case kAudioFileStreamProperty_ReadyToProducePackets:
            if (this->_queue != NULL && this->_magicCookie != NULL) {
                dzDebug(AudioQueueSetProperty(this->_queue, kAudioQueueProperty_MagicCookie, this->_magicCookie, this->_magicCookieSize), "Fail to set audio queue property: MagicCookie.");
            }
            if (this->_queue != NULL && this->_parser != NULL) {
                this->_status = DZAudioQueuePlayerStatus_ReadyToStart;
            }
            break;
        default:
            break;
    }
}
示例#12
0
		static OSStatus AttachNewCookie(AudioQueueRef inQueue, BackgroundTrackMgr::BG_FileInfo *inFileInfo)
		{
			OSStatus result = noErr;
			UInt32 size = sizeof(UInt32);
			result = AudioFileGetPropertyInfo (inFileInfo->mAFID, kAudioFilePropertyMagicCookieData, &size, NULL);
			if (!result && size) 
			{
				char* cookie = new char [size];		
				result = AudioFileGetProperty (inFileInfo->mAFID, kAudioFilePropertyMagicCookieData, &size, cookie);
					AssertNoError("Error getting cookie data", end);
				result = AudioQueueSetProperty(inQueue, kAudioQueueProperty_MagicCookie, cookie, size);
				delete [] cookie;
					AssertNoError("Error setting cookie data for queue", end);
			}
			return noErr;
		
		end:
			return noErr;
		}
示例#13
0
    bool load(CFURLRef url) {

        OSStatus status;
        memset(&aqData,0,sizeof(aqData));
        timeBase = 0;
        
        status = AudioFileOpenURL(url,kAudioFileReadPermission,0,&aqData.mAudioFile);
        checkStatus(status);
        if( status != noErr ) return false;
        
        UInt32 dataFormatSize = sizeof (aqData.mDataFormat);    // 1

        status = AudioFileGetProperty (                                  // 2
            aqData.mAudioFile,                                  // 3
            kAudioFilePropertyDataFormat,                       // 4
            &dataFormatSize,                                    // 5
            &aqData.mDataFormat                                 // 6
        );
        checkStatus(status);

        status = AudioQueueNewOutput (                                // 1
            &aqData.mDataFormat,                             // 2
            HandleOutputBuffer,                              // 3
            &aqData,                                         // 4
            CFRunLoopGetCurrent (),                          // 5
            kCFRunLoopCommonModes,                           // 6
            0,                                               // 7
            &aqData.mQueue                                   // 8
        );
        checkStatus(status);

        UInt32 maxPacketSize;
        UInt32 propertySize = sizeof (maxPacketSize);
        status = AudioFileGetProperty (                               // 1
            aqData.mAudioFile,                               // 2
            kAudioFilePropertyPacketSizeUpperBound,          // 3
            &propertySize,                                   // 4
            &maxPacketSize                                   // 5
        );
        checkStatus(status);

        deriveBufferSize (                                   // 6
            aqData.mDataFormat,                              // 7
            maxPacketSize,                                   // 8
            0.5,                                             // 9
            &aqData.bufferByteSize,                          // 10
            &aqData.mNumPacketsToRead                        // 11
        );
        
        bool isFormatVBR = (                                       // 1
            aqData.mDataFormat.mBytesPerPacket == 0 ||
            aqData.mDataFormat.mFramesPerPacket == 0
        );

        if (isFormatVBR) {                                         // 2
            aqData.mPacketDescs =
              (AudioStreamPacketDescription*) malloc (
                aqData.mNumPacketsToRead * sizeof (AudioStreamPacketDescription)
              );
        } else {                                                   // 3
            aqData.mPacketDescs = NULL;
        }

        UInt32 cookieSize = sizeof (UInt32);                   // 1
        OSStatus couldNotGetProperty =                             // 2
            AudioFileGetPropertyInfo (                         // 3
                aqData.mAudioFile,                             // 4
                kAudioFilePropertyMagicCookieData,             // 5
                &cookieSize,                                   // 6
                NULL                                           // 7
            );
    //    checkStatus(couldNotGetProperty);
        if (!couldNotGetProperty && cookieSize) {              // 8
            char* magicCookie =
                (char *) malloc (cookieSize);

            status = AudioFileGetProperty (                             // 9
                aqData.mAudioFile,                             // 10
                kAudioFilePropertyMagicCookieData,             // 11
                &cookieSize,                                   // 12
                magicCookie                                    // 13
            );
        checkStatus(status);

            status = AudioQueueSetProperty (                            // 14
                aqData.mQueue,                                 // 15
                kAudioQueueProperty_MagicCookie,               // 16
                magicCookie,                                   // 17
                cookieSize                                     // 18
            );
        checkStatus(status);

            free (magicCookie);                                // 19
        }

        return true;
    }
bool AudioQueueStreamOut::Open(const char *FileName)
{
    delete [] mInfo.mPacketDescs;
    mInfo.mPacketDescs = NULL;
    m_totalFrames = 0;
    mInfo.m_SeekToPacket = -1;
 	try {
   
        CFURLRef sndFile = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)FileName, strlen(FileName), false);
        if (!sndFile) return false;
            
        OSStatus result = AudioFileOpenURL (sndFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &mInfo.mAudioFile);
        CFRelease (sndFile);
                                
        UInt32 size = sizeof(mInfo.mDataFormat);
        XThrowIfError(AudioFileGetProperty(mInfo.mAudioFile, 
                                    kAudioFilePropertyDataFormat, &size, &mInfo.mDataFormat), "couldn't get file's data format");
        
        printf ("File format: "); mInfo.mDataFormat.Print();

        XThrowIfError(AudioQueueNewOutput(&mInfo.mDataFormat, AudioQueueStreamOut::AQBufferCallback, this, 
                                    NULL, kCFRunLoopCommonModes, 0, &mInfo.mQueue), "AudioQueueNew failed");

        UInt32 bufferByteSize;
        
        // we need to calculate how many packets we read at a time, and how big a buffer we need
        // we base this on the size of the packets in the file and an approximate duration for each buffer
        {
            bool isFormatVBR = (mInfo.mDataFormat.mBytesPerPacket == 0 || mInfo.mDataFormat.mFramesPerPacket == 0);
            
            // first check to see what the max size of a packet is - if it is bigger
            // than our allocation default size, that needs to become larger
            UInt32 maxPacketSize;
            size = sizeof(maxPacketSize);
            XThrowIfError(AudioFileGetProperty(mInfo.mAudioFile, 
                                    kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size");
            
            // adjust buffer size to represent about a half second of audio based on this format
            CalculateBytesForTime (mInfo.mDataFormat, maxPacketSize, 0.5/*seconds*/, &bufferByteSize, &mInfo.mNumPacketsToRead);
            
            if (isFormatVBR)
                mInfo.mPacketDescs = new AudioStreamPacketDescription [mInfo.mNumPacketsToRead];
            else
                mInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
                
            printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)mInfo.mNumPacketsToRead);
        }

        // (2) If the file has a cookie, we should get it and set it on the AQ
        size = sizeof(UInt32);
        result = AudioFileGetPropertyInfo (mInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL);

        if (!result && size) {
            char* cookie = new char [size];		
            XThrowIfError (AudioFileGetProperty (mInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file");
            XThrowIfError (AudioQueueSetProperty(mInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue");
            delete [] cookie;
        }

            // prime the queue with some data before starting
        mInfo.mDone = false;
        mInfo.mCurrentPacket = 0;
        for (UInt32 i = 0; i < sizeof(mInfo.mBuffers)/sizeof(mInfo.mBuffers[0]); ++i) {
            XThrowIfError(AudioQueueAllocateBuffer(mInfo.mQueue, bufferByteSize, &mInfo.mBuffers[i]), "AudioQueueAllocateBuffer failed");

            AQBufferCallback (this, mInfo.mQueue, mInfo.mBuffers[i]);
            
            if (mInfo.mDone) break;
        }	
        return IMUSIKStreamOutDefault::Create(NULL);
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
	}
    
    return false;
}
示例#15
0
OSStatus
darwin_configure_input_audio_queue (
                                 cahal_device*                 in_device,
                                 cahal_recorder_info*          in_callback_info,
                                 AudioStreamBasicDescription*  io_asbd,
                                 AudioQueueRef*                out_audio_queue
                                 )
{
  OSStatus result = noErr;
  
  if( NULL != io_asbd )
  {
    result =
    AudioQueueNewInput  (
                         io_asbd,
                         darwin_recorder_callback,
                         in_callback_info,
                         NULL,
                         kCFRunLoopCommonModes,
                         0,
                         out_audio_queue
                         );
    
    if( noErr == result )
    {
      if( NULL != in_device->device_uid )
      {
        CFStringRef device_uid =
        CFStringCreateWithCString (
                                   NULL,
                                   in_device->device_uid,
                                   kCFStringEncodingASCII
                                   );
        
        CPC_LOG (
                 CPC_LOG_LEVEL_TRACE,
                 "Setting queue device to %s.",
                 in_device->device_uid
                 );
        
        result =
        AudioQueueSetProperty (
                               *out_audio_queue,
                               kAudioQueueProperty_CurrentDevice,
                               &device_uid,
                               sizeof( device_uid )
                               );
        
        if( NULL != device_uid )
        {
          CFRelease( device_uid );
        }
      }
      
      if( result )
      {
        CPC_ERROR (
                   "Error setting current device (0x%x) to %s: 0x%x",
                   kAudioQueueProperty_CurrentDevice,
                   in_device->device_uid,
                   result
                   );
        
        CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result );
      }
      else
      {
        UINT32 property_size = sizeof( AudioStreamBasicDescription );
        
        result =
        AudioQueueGetProperty (
                               *out_audio_queue,
                               kAudioQueueProperty_StreamDescription,
                               io_asbd,
                               &property_size
                               );
        
        if( result )
        {
          CPC_ERROR(
                    "Error accessing property 0x%x on AudioQueue: %d",
                    kAudioConverterCurrentInputStreamDescription,
                    result
                    );
          
          CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result );
        }
      }
    }
    else
    {
      CPC_ERROR (
                 "Error creating AudioQueue: 0x%x.",
                 result
                 );
      
      CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result );
    }
  }
  else
  {
    CPC_LOG_STRING  (
                     CPC_LOG_LEVEL_ERROR,
                     "Invalid basic stream description"
                     );
  }
  
  return( result );
}
示例#16
0
OSStatus
darwin_configure_output_audio_queue (
                                cahal_device*                  in_device,
                                cahal_playback_info*           in_callback_info,
                                FLOAT32                        in_volume,
                                AudioStreamBasicDescription*   in_asbd,
                                AudioQueueRef*                 out_audio_queue
                                  )
{
  OSStatus result = noErr;
  
  if( NULL != in_asbd )
  {
    result =
    AudioQueueNewOutput  (
                         in_asbd,
                         darwin_playback_callback,
                         in_callback_info,
                         NULL,
                         kCFRunLoopCommonModes,
                         0,
                         out_audio_queue
                         );
    
    if( noErr == result )
    {
      if( NULL != in_device->device_uid )
      {
        CFStringRef device_uid =
        CFStringCreateWithCString (
                                   NULL,
                                   in_device->device_uid,
                                   kCFStringEncodingASCII
                                   );
        
        CPC_LOG (
                 CPC_LOG_LEVEL_TRACE,
                 "Setting queue device to %s.",
                 in_device->device_uid
                 );
        
        result =
        AudioQueueSetProperty (
                               *out_audio_queue,
                               kAudioQueueProperty_CurrentDevice,
                               &device_uid,
                               sizeof( device_uid )
                               );
        
        if( noErr == result )
        {
          result =
          AudioQueueSetParameter  (
                                   *out_audio_queue,
                                   kAudioQueueParam_Volume,
                                   in_volume
                                   );
        }
        
        
        if( NULL != device_uid )
        {
          CFRelease( device_uid );
        }
      }
    
      if( result )
      {
        CPC_ERROR (
                   "Error setting current device (0x%x) to %s: 0x%x",
                   kAudioQueueProperty_CurrentDevice,
                   in_device->device_uid,
                   result
                   );
        
        CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result );
      }
    }
    else
    {
      CPC_ERROR (
                 "Error creating AudioQueue: 0x%x.",
                 result
                 );
      
      CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result );
    }
  }
  else
  {
    CPC_LOG_STRING  (
                     CPC_LOG_LEVEL_ERROR,
                     "Invalid basic stream description"
                     );
  }
  
  return( result );
}
示例#17
0
static void aq_start_r(MSFilter * f)
{
	AQData *d = (AQData *) f->data;
	if (d->read_started == FALSE) {
		OSStatus aqresult;

		d->readAudioFormat.mSampleRate = d->rate;
		d->readAudioFormat.mFormatID = kAudioFormatLinearPCM;
		d->readAudioFormat.mFormatFlags =
			kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
		d->readAudioFormat.mFramesPerPacket = 1;
		d->readAudioFormat.mChannelsPerFrame = 1;
		d->readAudioFormat.mBitsPerChannel = d->bits;
		d->readAudioFormat.mBytesPerPacket = d->bits / 8;
		d->readAudioFormat.mBytesPerFrame = d->bits / 8;

		//show_format("input device", &d->devicereadFormat);
		//show_format("data from input filter", &d->readAudioFormat);

		memcpy(&d->devicereadFormat, &d->readAudioFormat,
			   sizeof(d->readAudioFormat));
		d->readBufferByteSize =
			kSecondsPerBuffer * d->devicereadFormat.mSampleRate *
			(d->devicereadFormat.mBitsPerChannel / 8) *
			d->devicereadFormat.mChannelsPerFrame;

#if 0
		aqresult = AudioConverterNew(&d->devicereadFormat,
									 &d->readAudioFormat,
									 &d->readAudioConverter);
		if (aqresult != noErr) {
			ms_error("d->readAudioConverter = %d", aqresult);
			d->readAudioConverter = NULL;
		}
#endif
		
		aqresult = AudioQueueNewInput(&d->devicereadFormat, readCallback, d,	// userData
									  NULL,	// run loop
									  NULL,	// run loop mode
									  0,	// flags
									  &d->readQueue);
		if (aqresult != noErr) {
			ms_error("AudioQueueNewInput = %d", aqresult);
		}

		if (d->uidname!=NULL){
			char uidname[256];
			CFStringGetCString(d->uidname, uidname, 256,
							   CFStringGetSystemEncoding());
			ms_message("AQ: using uidname:%s", uidname);
			aqresult =
				AudioQueueSetProperty(d->readQueue,
								  kAudioQueueProperty_CurrentDevice,
								  &d->uidname, sizeof(CFStringRef));
			if (aqresult != noErr) {
				ms_error
					("AudioQueueSetProperty on kAudioQueueProperty_CurrentDevice %d",
					 aqresult);
			}
		}

		setupRead(f);
		aqresult = AudioQueueStart(d->readQueue, NULL);	// start time. NULL means ASAP.
		if (aqresult != noErr) {
			ms_error("AudioQueueStart -read- %d", aqresult);
		}
		d->read_started = TRUE;
	}
}
bool UBAudioQueueRecorder::init(const QString& waveInDeviceName)
{
    if(mIsRecording)
    {
        setLastErrorMessage("Already recording ...");
        return false;
    }

    OSStatus err = AudioQueueNewInput (&sAudioFormat, UBAudioQueueRecorder::audioQueueInputCallback,
                    this, 0, 0, 0, &mQueue);

    if (err)
    {
        setLastErrorMessage(QString("Cannot acquire audio input %1").arg(err));
        mQueue = 0;
        close();
        return false;
    }

    //qDebug() << "init with waveInDeviceName ..." << waveInDeviceName;

    if (waveInDeviceName.length() > 0 && waveInDeviceName != "Default")
    {
        AudioDeviceID deviceID = deviceIDFromDeviceName(waveInDeviceName);

        if (deviceID)
        {
            QString deviceUID = deviceUIDFromDeviceID(deviceID);
            if (deviceUID.length() > 0)
            {
                CFStringRef sr = CFStringCreateWithCString(0, deviceUID.toUtf8().constData(), kCFStringEncodingUTF8);

                err = AudioQueueSetProperty(mQueue, kAudioQueueProperty_CurrentDevice, &sr, sizeof(CFStringRef));
                if (err)
                {
                    setLastErrorMessage(QString("Cannot set audio input %1 (%2)").arg(waveInDeviceName).arg(err));
                }
                else
                {
                    qDebug() << "recording with input" << waveInDeviceName;
                }
            }
            else
            {
                setLastErrorMessage(QString("Cannot find audio input device UID with ID %1 (%2)").arg(deviceID).arg(err));
            }
        }
        else
        {
            setLastErrorMessage(QString("Cannot find audio input with name %1 (%2)").arg(waveInDeviceName).arg(err));
        }
    }

    UInt32 monitor = true;

    err = AudioQueueSetProperty(mQueue, kAudioQueueProperty_EnableLevelMetering , &monitor, sizeof(UInt32));
    if (err)
    {
        qWarning() << QString("Cannot set recording level monitoring %1").arg(err);
    }

    int nbBuffers = 6;
    mSampleBufferSize = sAudioFormat.mSampleRate *  sAudioFormat.mChannelsPerFrame
                * 2 * mBufferLengthInMs / 1000; // 44.1 Khz * stereo * 16bit * buffer length

    for (int i = 0; i < nbBuffers; i++)
    {
        AudioQueueBufferRef outBuffer;
        err = AudioQueueAllocateBuffer(mQueue, mSampleBufferSize, &outBuffer);

        if (err)
        {
            setLastErrorMessage(QString("Cannot allocate audio buffer %1").arg(err));
            close();
            return false;
        }

        mBuffers << outBuffer;
    }

    foreach(AudioQueueBufferRef buffer, mBuffers)
    {
        err = AudioQueueEnqueueBuffer(mQueue, buffer, 0, 0);
        if (err)
        {
            setLastErrorMessage(QString("Cannot enqueue audio buffer %1").arg(err));
            close();
            return false;
        }
    }
    void AudioOutputDeviceCoreAudio::CreateAndStartAudioQueue() throw(Exception) {
        OSStatus res = AudioQueueNewOutput (
            &aqPlayerState.mDataFormat,
            HandleOutputBuffer,
            &aqPlayerState,
            CFRunLoopGetCurrent(),
            kCFRunLoopCommonModes,
            0,
            &aqPlayerState.mQueue
        );

        if(res) {
            String s = String("AudioQueueNewOutput: Error ") + ToString(res);
            throw Exception(s);
        }

        CFStringRef devUID = CFStringCreateWithCString (
            NULL, CurrentDevice.GetUID().c_str(), kCFStringEncodingASCII
        );
        res = AudioQueueSetProperty (
            aqPlayerState.mQueue,
            kAudioQueueProperty_CurrentDevice,
            &devUID, sizeof(CFStringRef)
        );
        CFRelease(devUID);

        if(res) {
            String s = String("Failed to set audio device: ") + ToString(res);
            throw Exception(s);
        }

        for (int i = 0; i < uiBufferNumber; ++i) {
            res = AudioQueueAllocateBuffer (
                aqPlayerState.mQueue,
                aqPlayerState.bufferByteSize,
                &aqPlayerState.mBuffers[i]
            );

            if(res) {
                String s = String("AudioQueueAllocateBuffer: Error ");
                throw Exception(s + ToString(res));
            }
        }

        res = AudioQueueAddPropertyListener (
            aqPlayerState.mQueue,
            kAudioQueueProperty_CurrentDevice,
            AudioQueueListener,
            NULL
        );
        if(res) std::cerr << "Failed to register device change listener: " << res << std::endl;

        res = AudioQueueAddPropertyListener (
            aqPlayerState.mQueue,
            kAudioQueueProperty_IsRunning,
            AudioQueueListener,
            NULL
        );
        if(res) std::cerr << "Failed to register running listener: " << res << std::endl;

        Float32 gain = 1.0;

        res = AudioQueueSetParameter (
            aqPlayerState.mQueue,
            kAudioQueueParam_Volume,
            gain
        );

        if(res) std::cerr << "AudioQueueSetParameter: Error " << res << std::endl;

        atomic_set(&(aqPlayerState.mIsRunning), 1);
        FillBuffers();
        PrimeAudioQueue();

        res = AudioQueueStart(aqPlayerState.mQueue, NULL);
        if(res) {
            String s = String("AudioQueueStart: Error ") + ToString(res);
            throw Exception(s);
        }
    }
示例#20
0
static void aq_start_w(MSFilter * f)
{
	AQData *d = (AQData *) f->data;
	if (d->write_started == FALSE) {
		OSStatus aqresult;

		d->writeAudioFormat.mSampleRate = d->rate;
		d->writeAudioFormat.mFormatID = kAudioFormatLinearPCM;
		d->writeAudioFormat.mFormatFlags =
			kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
		d->writeAudioFormat.mFramesPerPacket = 1;
		d->writeAudioFormat.mChannelsPerFrame = 1;
		d->writeAudioFormat.mBitsPerChannel = d->bits;
		d->writeAudioFormat.mBytesPerPacket = d->bits / 8;
		d->writeAudioFormat.mBytesPerFrame = d->bits / 8;

		show_format("data provided to output filter",	&d->writeAudioFormat);
		show_format("output device", &d->devicewriteFormat);

		memcpy(&d->devicewriteFormat, &d->writeAudioFormat,
			   sizeof(d->writeAudioFormat));
		d->writeBufferByteSize =
			kSecondsPerBuffer * d->devicewriteFormat.mSampleRate *
			(d->devicewriteFormat.mBitsPerChannel / 8) *
			d->devicewriteFormat.mChannelsPerFrame;

#if 0
		aqresult = AudioConverterNew(&d->writeAudioFormat,
									 &d->devicewriteFormat,
									 &d->writeAudioConverter);
		if (aqresult != noErr) {
			ms_error("d->writeAudioConverter = %d", aqresult);
			d->writeAudioConverter = NULL;
		}
#endif
		
		// create the playback audio queue object
		aqresult = AudioQueueNewOutput(&d->devicewriteFormat, writeCallback, d, NULL,	/*CFRunLoopGetCurrent () */
									   NULL,	/*kCFRunLoopCommonModes */
									   0,	// run loop flags
									   &d->writeQueue);
		if (aqresult != noErr) {
			ms_error("AudioQueueNewOutput = %d", aqresult);
		}

		AudioQueueSetParameter (d->writeQueue,
					kAudioQueueParam_Volume,
					gain_volume_out);

		if (d->uidname!=NULL){
			char uidname[256];
			CFStringGetCString(d->uidname, uidname, 256,
							   CFStringGetSystemEncoding());
			ms_message("AQ: using uidname:%s", uidname);
			aqresult =
				AudioQueueSetProperty(d->writeQueue,
									  kAudioQueueProperty_CurrentDevice,
									  &d->uidname, sizeof(CFStringRef));
			if (aqresult != noErr) {
				ms_error
					("AudioQueueSetProperty on kAudioQueueProperty_CurrentDevice %d",
					 aqresult);
			}
		}

		setupWrite(f);
		d->curWriteBuffer = 0;
	}
}
示例#21
0
int main (int argc, const char * argv[]) 
{
#if TARGET_OS_WIN32
	InitializeQTML(0L);
#endif
	const char *fpath = NULL;
	Float32 volume = 1;
	Float32 duration = -1;
	Float32 currentTime = 0.0;
	Float32 rate = 0;
	int rQuality = 0;
	
	bool doPrint = false;
	for (int i = 1; i < argc; ++i) {
		const char *arg = argv[i];
		if (arg[0] != '-') {
			if (fpath != NULL) {
				fprintf(stderr, "may only specify one file to play\n");
				usage();
			}
			fpath = arg;
		} else {
			arg += 1;
			if (arg[0] == 'v' || !strcmp(arg, "-volume")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];
				sscanf(arg, "%f", &volume);
			} else if (arg[0] == 't' || !strcmp(arg, "-time")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%f", &duration);
			} else if (arg[0] == 'r' || !strcmp(arg, "-rate")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%f", &rate);
			} else if (arg[0] == 'q' || !strcmp(arg, "-rQuality")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%d", &rQuality);
			} else if (arg[0] == 'h' || !strcmp(arg, "-help")) {
				usage();
			} else if (arg[0] == 'd' || !strcmp(arg, "-debug")) {
				doPrint = true;
			} else {
				fprintf(stderr, "unknown argument: %s\n\n", arg - 1);
				usage();
			}
		}
	}

	if (fpath == NULL)
		usage();
	
	if (doPrint)
		printf ("Playing file: %s\n", fpath);
	
	try {
		AQTestInfo myInfo;
		
		CFURLRef sndFile = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)fpath, strlen(fpath), false);
		if (!sndFile) XThrowIfError (!sndFile, "can't parse file path");
			
		OSStatus result = AudioFileOpenURL (sndFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile);
		CFRelease (sndFile);
						
		XThrowIfError(result, "AudioFileOpen failed");
		
		UInt32 size;
		XThrowIfError(AudioFileGetPropertyInfo(myInfo.mAudioFile, 
									kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info");
		UInt32 numFormats = size / sizeof(AudioFormatListItem);
		AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ];

		XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, 
									kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format");
		numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it
		if (numFormats == 1) {
				// this is the common case
			myInfo.mDataFormat = formatList[0].mASBD;
			
				// see if there is a channel layout (multichannel file)
			result = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, NULL);
			if (result == noErr && myInfo.mChannelLayoutSize > 0) {
				myInfo.mChannelLayout = (AudioChannelLayout *)new char [myInfo.mChannelLayoutSize];
				XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, myInfo.mChannelLayout), "get audio file's channel layout");
			}
		} else {
			if (doPrint) {
				printf ("File has a %d layered data format:\n", (int)numFormats);
				for (unsigned int i = 0; i < numFormats; ++i)
					CAStreamBasicDescription(formatList[i].mASBD).Print();
			}
			// now we should look to see which decoders we have on the system
			XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's");
			UInt32 numDecoders = size / sizeof(OSType);
			OSType *decoderIDs = new OSType [ numDecoders ];
			XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's");			
			unsigned int i = 0;
			for (; i < numFormats; ++i) {
				OSType decoderID = formatList[i].mASBD.mFormatID;
				bool found = false;
				for (unsigned int j = 0; j < numDecoders; ++j) {
					if (decoderID == decoderIDs[j]) {
						found = true;
						break;
					}
				}
				if (found) break;
			}
			delete [] decoderIDs;
			
			if (i >= numFormats) {
				fprintf (stderr, "Cannot play any of the formats in this file\n");
				throw kAudioFileUnsupportedDataFormatError;
			}
			myInfo.mDataFormat = formatList[i].mASBD;
			myInfo.mChannelLayoutSize = sizeof(AudioChannelLayout);
			myInfo.mChannelLayout = (AudioChannelLayout*)new char [myInfo.mChannelLayoutSize];
			myInfo.mChannelLayout->mChannelLayoutTag = formatList[i].mChannelLayoutTag;
			myInfo.mChannelLayout->mChannelBitmap = 0;
			myInfo.mChannelLayout->mNumberChannelDescriptions = 0;
		}
		delete [] formatList;
		
		if (doPrint) {
			printf ("Playing format: "); 
			myInfo.mDataFormat.Print();
		}
		
		XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat, AQTestBufferCallback, &myInfo, 
									CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &myInfo.mQueue), "AudioQueueNew failed");

		UInt32 bufferByteSize;		
		// we need to calculate how many packets we read at a time, and how big a buffer we need
		// we base this on the size of the packets in the file and an approximate duration for each buffer
		{
			bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0);
			
			// first check to see what the max size of a packet is - if it is bigger
			// than our allocation default size, that needs to become larger
			UInt32 maxPacketSize;
			size = sizeof(maxPacketSize);
			XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, 
									kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size");
			
			// adjust buffer size to represent about a half second of audio based on this format
			CalculateBytesForTime (myInfo.mDataFormat, maxPacketSize, 0.5/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead);
			
			if (isFormatVBR)
				myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead];
			else
				myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
				
			if (doPrint)
				printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead);
		}

		// (2) If the file has a cookie, we should get it and set it on the AQ
		size = sizeof(UInt32);
		result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL);

		if (!result && size) {
			char* cookie = new char [size];		
			XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file");
			XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue");
			delete [] cookie;
		}

		// set ACL if there is one
		if (myInfo.mChannelLayout)
			XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, myInfo.mChannelLayout, myInfo.mChannelLayoutSize), "set channel layout on queue");

		// prime the queue with some data before starting
		myInfo.mDone = false;
		myInfo.mCurrentPacket = 0;
		for (int i = 0; i < kNumberBuffers; ++i) {
			XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffers[i]), "AudioQueueAllocateBuffer failed");

			AQTestBufferCallback (&myInfo, myInfo.mQueue, myInfo.mBuffers[i]);
			
			if (myInfo.mDone) break;
		}	
			// set the volume of the queue
		XThrowIfError (AudioQueueSetParameter(myInfo.mQueue, kAudioQueueParam_Volume, volume), "set queue volume");
		
		XThrowIfError (AudioQueueAddPropertyListener (myInfo.mQueue, kAudioQueueProperty_IsRunning, MyAudioQueuePropertyListenerProc, NULL), "add listener");
		
#if !TARGET_OS_IPHONE
		if (rate > 0) {
			UInt32 propValue = 1;
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_EnableTimePitch, &propValue, sizeof(propValue)), "enable time pitch");
			
			propValue = rQuality ? kAudioQueueTimePitchAlgorithm_Spectral : kAudioQueueTimePitchAlgorithm_TimeDomain;
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchAlgorithm, &propValue, sizeof(propValue)), "time pitch algorithm");
			
			propValue = (rate == 1.0f ? 1 : 0); // bypass rate if 1.0
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchBypass, &propValue, sizeof(propValue)), "bypass time pitch");
			if (rate != 1) {
				XThrowIfError (AudioQueueSetParameter (myInfo.mQueue, kAudioQueueParam_PlayRate, rate), "set playback rate");
			}
			
			if (doPrint) {
				printf ("Enable rate-scaled playback (rate = %.2f) using %s algorithm\n", rate, (rQuality ? "Spectral": "Time Domain"));
			}
		}
#endif
			// lets start playing now - stop is called in the AQTestBufferCallback when there's
			// no more to read from the file
		XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed");

		do {
			CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false);
			currentTime += .25;
			if (duration > 0 && currentTime >= duration)
				break;
			
		} while (gIsRunning);
			
		CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false);

		XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed");
		XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed");
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
	}
	catch (...) {
		fprintf(stderr, "Unspecified exception\n");
	}
	
    return 0;
}
示例#22
0
文件: player.c 项目: Yoonster/dhun
void playFile(const char* filePath) {

  CFURLRef audioFileURL = CFURLCreateFromFileSystemRepresentation(NULL,
                                                                  (UInt8*) filePath,
                                                                  strlen (filePath),
                                                                  false);


  OSStatus result = AudioFileOpenURL(audioFileURL,
                                     fsRdPerm,
                                     0,
                                     &aqData.mAudioFile);

  CFRelease (audioFileURL);

  UInt32 dataFormatSize = sizeof (aqData.mDataFormat);

  AudioFileGetProperty(aqData.mAudioFile,
                       kAudioFilePropertyDataFormat,
                       &dataFormatSize,
                       &aqData.mDataFormat);

  AudioQueueNewOutput(&aqData.mDataFormat,
                      HandleOutputBuffer,
                      &aqData,
                      CFRunLoopGetCurrent(),
                      kCFRunLoopCommonModes,
                      0,
                      &aqData.mQueue);

  UInt32 maxPacketSize;
  UInt32 propertySize = sizeof (maxPacketSize);
  AudioFileGetProperty(aqData.mAudioFile,
                       kAudioFilePropertyPacketSizeUpperBound,
                       &propertySize,
                       &maxPacketSize);


  DeriveBufferSize(&aqData.mDataFormat,
                   maxPacketSize,
                   0.5,
                   &aqData.bufferByteSize,
                   &aqData.mNumPacketsToRead);

  bool isFormatVBR = (aqData.mDataFormat.mBytesPerPacket == 0 ||
                      aqData.mDataFormat.mFramesPerPacket == 0);

  if (isFormatVBR) {
    // LOG("%s\n","VBR");
    aqData.mPacketDescs =
      (AudioStreamPacketDescription*)
      malloc (aqData.mNumPacketsToRead * sizeof (AudioStreamPacketDescription));
  } else {
    aqData.mPacketDescs = NULL;
  }

  UInt32 cookieSize = sizeof (UInt32);
  bool couldNotGetProperty =
    AudioFileGetPropertyInfo (aqData.mAudioFile,
                              kAudioFilePropertyMagicCookieData,
                              &cookieSize,
                              NULL);

  if (!couldNotGetProperty && cookieSize) {
    char* magicCookie = (char *) malloc (cookieSize);

    AudioFileGetProperty (aqData.mAudioFile,
                          kAudioFilePropertyMagicCookieData,
                          &cookieSize,
                          magicCookie);

    AudioQueueSetProperty (aqData.mQueue,
                           kAudioQueueProperty_MagicCookie,
                           magicCookie,
                           cookieSize);

    free (magicCookie);
  }

  aqData.mCurrentPacket = 0;
  aqData.mIsRunning = true;

  //LOG("%d\n", aqData.mNumPacketsToRead);
  for (int i = 0; i < kNumberBuffers; ++i) {
    AudioQueueAllocateBuffer (aqData.mQueue,
                              aqData.bufferByteSize,
                              &aqData.mBuffers[i]);

    HandleOutputBuffer (&aqData,
                        aqData.mQueue,
                        aqData.mBuffers[i]);
  }

  Float32 gain = 1.0;
  // Optionally, allow user to override gain setting here
  AudioQueueSetParameter (aqData.mQueue,
                          kAudioQueueParam_Volume,
                          gain);


  //LOG("%s\n","Starting play");


  // IMPORTANT NOTE : This value must be set
  // Before the call to HandleOutputBuffer
  //a qData.mIsRunning = true;

  AudioQueueStart (aqData.mQueue,
                   NULL);

}
void DoAQOfflineRender(CFURLRef sourceURL, CFURLRef destinationURL) 
{
    // main audio queue code
	try {
		AQTestInfo myInfo;
        
		myInfo.mDone = false;
		myInfo.mFlushed = false;
		myInfo.mCurrentPacket = 0;
		
        // get the source file
        XThrowIfError(AudioFileOpenURL(sourceURL, 0x01/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile), "AudioFileOpen failed");
			
		UInt32 size = sizeof(myInfo.mDataFormat);
		XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyDataFormat, &size, &myInfo.mDataFormat), "couldn't get file's data format");
		
		printf ("File format: "); myInfo.mDataFormat.Print();

        // create a new audio queue output
		XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat,      // The data format of the audio to play. For linear PCM, only interleaved formats are supported.
                                          AQTestBufferCallback,     // A callback function to use with the playback audio queue.
                                          &myInfo,                  // A custom data structure for use with the callback function.
                                          CFRunLoopGetCurrent(),    // The event loop on which the callback function pointed to by the inCallbackProc parameter is to be called.
                                                                    // If you specify NULL, the callback is invoked on one of the audio queue’s internal threads.
                                          kCFRunLoopCommonModes,    // The run loop mode in which to invoke the callback function specified in the inCallbackProc parameter. 
                                          0,                        // Reserved for future use. Must be 0.
                                          &myInfo.mQueue),          // On output, the newly created playback audio queue object.
                                          "AudioQueueNew failed");

		UInt32 bufferByteSize;
		
		// we need to calculate how many packets we read at a time and how big a buffer we need
		// we base this on the size of the packets in the file and an approximate duration for each buffer
		{
			bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0);
			
			// first check to see what the max size of a packet is - if it is bigger
			// than our allocation default size, that needs to become larger
			UInt32 maxPacketSize;
			size = sizeof(maxPacketSize);
			XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size");
			
			// adjust buffer size to represent about a second of audio based on this format
			CalculateBytesForTime(myInfo.mDataFormat, maxPacketSize, 1.0/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead);
			
			if (isFormatVBR) {
				myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead];
			} else {
				myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
            }
				
			printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead);
		}

		// if the file has a magic cookie, we should get it and set it on the AQ
		size = sizeof(UInt32);
		OSStatus result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL);

		if (!result && size) {
			char* cookie = new char [size];		
			XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file");
			XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue");
			delete [] cookie;
		}

		// channel layout?
		OSStatus err = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &size, NULL);
		AudioChannelLayout *acl = NULL;
		if (err == noErr && size > 0) {
			acl = (AudioChannelLayout *)malloc(size);
			XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &size, acl), "get audio file's channel layout");
			XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, acl, size), "set channel layout on queue");
		}

		//allocate the input read buffer
		XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffer), "AudioQueueAllocateBuffer");

		// prepare a canonical interleaved capture format
		CAStreamBasicDescription captureFormat;
		captureFormat.mSampleRate = myInfo.mDataFormat.mSampleRate;
		captureFormat.SetAUCanonical(myInfo.mDataFormat.mChannelsPerFrame, true); // interleaved
		XThrowIfError(AudioQueueSetOfflineRenderFormat(myInfo.mQueue, &captureFormat, acl), "set offline render format");			
		
		ExtAudioFileRef captureFile;
        
		// prepare a 16-bit int file format, sample channel count and sample rate
		CAStreamBasicDescription dstFormat;
		dstFormat.mSampleRate = myInfo.mDataFormat.mSampleRate;
		dstFormat.mChannelsPerFrame = myInfo.mDataFormat.mChannelsPerFrame;
		dstFormat.mFormatID = kAudioFormatLinearPCM;
		dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
		dstFormat.mBitsPerChannel = 16;
		dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
		dstFormat.mFramesPerPacket = 1;
		
		// create the capture file
        XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, acl, kAudioFileFlags_EraseFile, &captureFile), "ExtAudioFileCreateWithURL");
		
        // set the capture file's client format to be the canonical format from the queue
		XThrowIfError(ExtAudioFileSetProperty(captureFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &captureFormat), "set ExtAudioFile client format");
		
		// allocate the capture buffer, just keep it at half the size of the enqueue buffer
        // we don't ever want to pull any faster than we can push data in for render
        // this 2:1 ratio keeps the AQ Offline Render happy
		const UInt32 captureBufferByteSize = bufferByteSize / 2;
		
        AudioQueueBufferRef captureBuffer;
		AudioBufferList captureABL;
		
		XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, captureBufferByteSize, &captureBuffer), "AudioQueueAllocateBuffer");
		
        captureABL.mNumberBuffers = 1;
		captureABL.mBuffers[0].mData = captureBuffer->mAudioData;
		captureABL.mBuffers[0].mNumberChannels = captureFormat.mChannelsPerFrame;

		// lets start playing now - stop is called in the AQTestBufferCallback when there's
		// no more to read from the file
		XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed");

		AudioTimeStamp ts;
		ts.mFlags = kAudioTimeStampSampleTimeValid;
		ts.mSampleTime = 0;

		// we need to call this once asking for 0 frames
		XThrowIfError(AudioQueueOfflineRender(myInfo.mQueue, &ts, captureBuffer, 0), "AudioQueueOfflineRender");

		// we need to enqueue a buffer after the queue has started
		AQTestBufferCallback(&myInfo, myInfo.mQueue, myInfo.mBuffer);

		while (true) {
			UInt32 reqFrames = captureBufferByteSize / captureFormat.mBytesPerFrame;
			
            XThrowIfError(AudioQueueOfflineRender(myInfo.mQueue, &ts, captureBuffer, reqFrames), "AudioQueueOfflineRender");
			
            captureABL.mBuffers[0].mData = captureBuffer->mAudioData;
			captureABL.mBuffers[0].mDataByteSize = captureBuffer->mAudioDataByteSize;
			UInt32 writeFrames = captureABL.mBuffers[0].mDataByteSize / captureFormat.mBytesPerFrame;
			
            printf("t = %.f: AudioQueueOfflineRender:  req %d fr/%d bytes, got %ld fr/%d bytes\n", ts.mSampleTime, (int)reqFrames, (int)captureBufferByteSize, writeFrames, (int)captureABL.mBuffers[0].mDataByteSize);
            
			XThrowIfError(ExtAudioFileWrite(captureFile, writeFrames, &captureABL), "ExtAudioFileWrite");
			
            if (myInfo.mFlushed) break;
			
			ts.mSampleTime += writeFrames;
		}

		CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false);

		XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed");
		XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed");
		XThrowIfError(ExtAudioFileDispose(captureFile), "ExtAudioFileDispose failed");

		if (myInfo.mPacketDescs) delete [] myInfo.mPacketDescs;
		if (acl) free(acl);
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
	}
    
    return;
}
示例#24
0
void Audio_Queue::init()
{
    OSStatus err = noErr;
    
    cleanup();
        
    // create the audio queue
    err = AudioQueueNewOutput(&m_streamDesc, audioQueueOutputCallback, this, CFRunLoopGetCurrent(), NULL, 0, &m_outAQ);
    if (err) {
        AQ_TRACE("%s: error in AudioQueueNewOutput\n", __PRETTY_FUNCTION__);
        
        m_lastError = err;
        
        if (m_delegate) {
            m_delegate->audioQueueInitializationFailed();
        }
        
        return;
    }
    
    Stream_Configuration *configuration = Stream_Configuration::configuration();
    
    // allocate audio queue buffers
    for (unsigned int i = 0; i < configuration->bufferCount; ++i) {
        err = AudioQueueAllocateBuffer(m_outAQ, configuration->bufferSize, &m_audioQueueBuffer[i]);
        if (err) {
            /* If allocating the buffers failed, everything else will fail, too.
             *  Dispose the queue so that we can later on detect that this
             *  queue in fact has not been initialized.
             */
            
            AQ_TRACE("%s: error in AudioQueueAllocateBuffer\n", __PRETTY_FUNCTION__);
            
            (void)AudioQueueDispose(m_outAQ, true);
            m_outAQ = 0;
            
            m_lastError = err;
            
            if (m_delegate) {
                m_delegate->audioQueueInitializationFailed();
            }
            
            return;
        }
    }
    
    // listen for kAudioQueueProperty_IsRunning
    err = AudioQueueAddPropertyListener(m_outAQ, kAudioQueueProperty_IsRunning, audioQueueIsRunningCallback, this);
    if (err) {
        AQ_TRACE("%s: error in AudioQueueAddPropertyListener\n", __PRETTY_FUNCTION__);
        m_lastError = err;
        return;
    }
    
    if (configuration->enableTimeAndPitchConversion) {
        UInt32 enableTimePitchConversion = 1;
        
        err = AudioQueueSetProperty (m_outAQ, kAudioQueueProperty_EnableTimePitch, &enableTimePitchConversion, sizeof(enableTimePitchConversion));
        if (err != noErr) {
            AQ_TRACE("Failed to enable time and pitch conversion. Play rate setting will fail\n");
        }
    }
    
    if (m_initialOutputVolume != 1.0) {
        setVolume(m_initialOutputVolume);
    }
}
示例#25
0
void StreamPropertyListenerProc(void * inClientData,
                                AudioFileStreamID inAudioFileStream,
                                AudioFileStreamPropertyID inPropertyID,
                                UInt32 * ioFlags)
{
    // this is called by audio file stream when it finds property values
    struct audioPlayer* player = (struct audioPlayer*)inClientData;
    OSStatus err = noErr;

//    printf("found property '%c%c%c%c'\n", (inPropertyID>>24)&255, (inPropertyID>>16)&255, (inPropertyID>>8)&255, inPropertyID&255);

    switch (inPropertyID) {
    case kAudioFileStreamProperty_ReadyToProducePackets :
    {
        // the file stream parser is now ready to produce audio packets.
        // get the stream format.
        AudioStreamBasicDescription asbd;
        UInt32 asbdSize = sizeof(asbd);
        err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataFormat, &asbdSize, &asbd);
        if (err) {
            PRINTERROR("get kAudioFileStreamProperty_DataFormat");
            player->failed = true;
            break;
        }

        //TODO: Is this really right!?!
        player->songDuration = player->waith.contentLength * 2000 / asbd.mSampleRate;
        player->samplerate = asbd.mSampleRate;

        player->packetDuration = asbd.mFramesPerPacket / asbd.mSampleRate;

        // create the audio queue
        err = AudioQueueNewOutput(&asbd, PianobarAudioQueueOutputCallback, player, NULL, NULL, 0, &player->audioQueue);
        if (err) {
            PRINTERROR("AudioQueueNewOutput");
            player->failed = true;
            break;
        }

        // allocate audio queue buffers
        for (unsigned int i = 0; i < kNumAQBufs; ++i) {
            err = AudioQueueAllocateBuffer(player->audioQueue, kAQBufSize, &player->audioQueueBuffer[i]);
            if (err) {
                PRINTERROR("AudioQueueAllocateBuffer");
                player->failed = true;
                break;
            }
        }


        // get the cookie size
        UInt32 cookieSize;
        Boolean writable;
        err = AudioFileStreamGetPropertyInfo(inAudioFileStream, kAudioFileStreamProperty_MagicCookieData, &cookieSize, &writable);
        if (err) {
            PRINTERROR("info kAudioFileStreamProperty_MagicCookieData");
            break;
        }

        // get the cookie data
        void* cookieData = calloc(1, cookieSize);
        err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_MagicCookieData, &cookieSize, cookieData);
        if (err) {
            PRINTERROR("get kAudioFileStreamProperty_MagicCookieData");
            free(cookieData);
            break;
        }

        // set the cookie on the queue.
        err = AudioQueueSetProperty(player->audioQueue, kAudioQueueProperty_MagicCookie, cookieData, cookieSize);
        free(cookieData);
        if (err) {
            PRINTERROR("set kAudioQueueProperty_MagicCookie");
            break;
        }

        // listen for kAudioQueueProperty_IsRunning
        err = AudioQueueAddPropertyListener(player->audioQueue, kAudioQueueProperty_IsRunning, AudioQueueIsRunningCallback, player);
        if (err) {
            PRINTERROR("AudioQueueAddPropertyListener");
            player->failed = true;
            break;
        }

        break;
    }
    }
}