Ejemplo n.º 1
0
// _______________________________________________________________________________________
//
// call for existing file, NOT new one - from Open() or Wrap()
void	CAAudioFile::GetExistingFileInfo()
{
	LOG_FUNCTION("CAAudioFile::GetExistingFileInfo", "%p", this);
	UInt32 propertySize;
	OSStatus err;

	// get mFileDataFormat
	propertySize = sizeof(AudioStreamBasicDescription);
	XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyDataFormat, &propertySize, &mFileDataFormat), "get audio file's data format");

	// get mFileChannelLayout
	err = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyChannelLayout, &propertySize, NULL);
	if (err == noErr && propertySize > 0) {
		AudioChannelLayout *layout = static_cast<AudioChannelLayout *>(malloc(propertySize));
		err = AudioFileGetProperty(mAudioFile, kAudioFilePropertyChannelLayout, &propertySize, layout);
		if (err == noErr) {
			mFileChannelLayout = layout;
#if VERBOSE_CHANNELMAP
			printf("existing file's channel layout: %s\n", CAChannelLayouts::ConstantToString(mFileChannelLayout.Tag()));
#endif
		}
		free(layout);
		XThrowIfError(err, "get audio file's channel layout");
	}
	if (mMode != kReading)
		return;

#if 0
	// get mNumberPackets
	propertySize = sizeof(mNumberPackets);
	XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyAudioDataPacketCount, &propertySize, &mNumberPackets), "get audio file's packet count");
#if VERBOSE_IO
	printf("CAAudioFile::GetExistingFileInfo: %qd packets\n", mNumberPackets);
#endif
#endif

	// get mMagicCookie
	err = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyMagicCookieData, &propertySize, NULL);
	if (err == noErr && propertySize > 0) {
		mMagicCookie = new Byte[propertySize];
		mMagicCookieSize = propertySize;
		XThrowIfError(AudioFileGetProperty(mAudioFile, kAudioFilePropertyMagicCookieData, &propertySize, mMagicCookie), "get audio file's magic cookie");
	}
	InitFileMaxPacketSize();
	mPacketMark = 0;
	mFrameMark = 0;

	UpdateClientMaxPacketSize();
}
Ejemplo n.º 2
0
 music_obj<audio_queue_driver>::music_obj(const boost::shared_ptr<ifdstream>& ifd, bool loop, float gain,
                                          float start, float end)
 : packet_index_(0)
 , start_packet_index_(0)
 , stop_packet_index_(0)
 , volume_(gain)
 , loop_(loop)
 , is_paused_(false)
 , ifd_(ifd)
 {        
     LOG("Got ifdstream from path..");
     
     OSStatus res = AudioFileOpenWithCallbacks(this, &music_obj::af_read_cb, &music_obj::af_write_cb,
                         &music_obj::af_get_size_cb, &music_obj::af_set_size_cb, 
                             kAudioFileCAFType, &audio_file_);
     
     if(res)
     {
         throw sys_exception("audio_queue_driver: couldn't open audio file in liverpool fs. AudioFile returned " 
                                 + boost::lexical_cast<std::string>(res));
     }
     
     UInt32 size = sizeof(data_format_);
     AudioFileGetProperty(audio_file_, kAudioFilePropertyDataFormat, &size, &data_format_);
     
     AudioQueueNewOutput(&data_format_, &music_obj<audio_queue_driver>::buffer_cb, this, NULL, NULL, 0, &queue_);        
     AudioQueueAddPropertyListener(queue_, kAudioQueueProperty_IsRunning, &music_obj<audio_queue_driver>::playback_cb, this);
     
     if (data_format_.mBytesPerPacket == 0 || data_format_.mFramesPerPacket == 0)
     {
         size = sizeof(max_packet_size_);
         AudioFileGetProperty(audio_file_, kAudioFilePropertyPacketSizeUpperBound, &size, &max_packet_size_);
         if (max_packet_size_ > BUFFER_SIZE_BYTES)
         {
             max_packet_size_ = BUFFER_SIZE_BYTES;
         }
         
         num_packets_to_read_ = BUFFER_SIZE_BYTES / max_packet_size_;
         packet_descriptions_ = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * num_packets_to_read_);
     }
     else
     {
         num_packets_to_read_ = BUFFER_SIZE_BYTES / data_format_.mBytesPerPacket;
         packet_descriptions_ = NULL;
     }
     
     AudioFileGetPropertyInfo(audio_file_, kAudioFilePropertyMagicCookieData, &size, NULL);
     if (size > 0)
     {
         char* cookie = (char*)malloc(sizeof(char) * size);
         AudioFileGetProperty(audio_file_, kAudioFilePropertyMagicCookieData, &size, cookie);
         AudioQueueSetProperty(queue_, kAudioQueueProperty_MagicCookie, cookie, size);
         free(cookie);
     }
     
     calculate_seek(start, end);        
     volume(volume_);
     prime();   
 }
Ejemplo n.º 3
0
		OSStatus SetupQueue(BG_FileInfo *inFileInfo)
		{
			UInt32 size = 0;
			OSStatus result = AudioQueueNewOutput(&inFileInfo->mFileFormat, QueueCallback, this, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &mQueue);
					AssertNoError("Error creating queue", end);

			// (2) If the file has a cookie, we should get it and set it on the AQ
			size = sizeof(UInt32);
			result = AudioFileGetPropertyInfo (inFileInfo->mAFID, kAudioFilePropertyMagicCookieData, &size, NULL);

			if (!result && size) {
				char* cookie = new char [size];		
				result = AudioFileGetProperty (inFileInfo->mAFID, kAudioFilePropertyMagicCookieData, &size, cookie);
					AssertNoError("Error getting magic cookie", end);
				result = AudioQueueSetProperty(mQueue, kAudioQueueProperty_MagicCookie, cookie, size);
				delete [] cookie;
					AssertNoError("Error setting magic cookie", end);
			}

			// channel layout
			OSStatus err = AudioFileGetPropertyInfo(inFileInfo->mAFID, kAudioFilePropertyChannelLayout, &size, NULL);
			if (err == noErr && size > 0) {
				AudioChannelLayout *acl = (AudioChannelLayout *)malloc(size);
				result = AudioFileGetProperty(inFileInfo->mAFID, kAudioFilePropertyChannelLayout, &size, acl);
					AssertNoError("Error getting channel layout from file", end);
				result = AudioQueueSetProperty(mQueue, kAudioQueueProperty_ChannelLayout, acl, size);
				free(acl);
					AssertNoError("Error setting channel layout on queue", end);
			}
			
			// add a notification proc for when the queue stops
			result = AudioQueueAddPropertyListener(mQueue, kAudioQueueProperty_IsRunning, QueueStoppedProc, this);
				AssertNoError("Error adding isRunning property listener to queue", end);
				
			// we need to reset this variable so that if the queue is stopped mid buffer we don't dispose it 
			mMakeNewQueueWhenStopped = false;
			
			// volume
			result = SetVolume(mVolume);
			
		//end:
			return result;
		}
Ejemplo n.º 4
0
 music_obj<audio_queue_driver>::music_obj(const std::string& file_path, bool loop, float gain,
                                          float start, float end)
 : packet_index_(0)
 , start_packet_index_(0)
 , stop_packet_index_(0)
 , volume_(gain)
 , loop_(loop)
 , is_paused_(false)
 {
     CFURLRef file_url = CFURLCreateFromFileSystemRepresentation(NULL, (const UInt8 *)file_path.c_str(), file_path.size(), false);
     OSStatus res = AudioFileOpenURL(file_url, kAudioFileReadPermission, kAudioFileCAFType, &audio_file_);
     CFRelease(file_url);
     
     if(res)
     {
         throw sys_exception("audio_queue_driver: couldn't open audio file at '" + file_path + "'");
     }
     
     UInt32 size = sizeof(data_format_);
     AudioFileGetProperty(audio_file_, kAudioFilePropertyDataFormat, &size, &data_format_);
     
     AudioQueueNewOutput(&data_format_, &music_obj<audio_queue_driver>::buffer_cb, this, NULL, NULL, 0, &queue_);        
     AudioQueueAddPropertyListener(queue_, kAudioQueueProperty_IsRunning, &music_obj<audio_queue_driver>::playback_cb, this);
     
     if (data_format_.mBytesPerPacket == 0 || data_format_.mFramesPerPacket == 0)
     {
         size = sizeof(max_packet_size_);
         AudioFileGetProperty(audio_file_, kAudioFilePropertyPacketSizeUpperBound, &size, &max_packet_size_);
         if (max_packet_size_ > BUFFER_SIZE_BYTES)
         {
             max_packet_size_ = BUFFER_SIZE_BYTES;
         }
         
         num_packets_to_read_ = BUFFER_SIZE_BYTES / max_packet_size_;
         packet_descriptions_ = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * num_packets_to_read_);
     }
     else
     {
         num_packets_to_read_ = BUFFER_SIZE_BYTES / data_format_.mBytesPerPacket;
         packet_descriptions_ = NULL;
     }
     
     AudioFileGetPropertyInfo(audio_file_, kAudioFilePropertyMagicCookieData, &size, NULL);
     if (size > 0)
     {
         char* cookie = (char*)malloc(sizeof(char) * size);
         AudioFileGetProperty(audio_file_, kAudioFilePropertyMagicCookieData, &size, cookie);
         AudioQueueSetProperty(queue_, kAudioQueueProperty_MagicCookie, cookie, size);
         free(cookie);
     }
     
     calculate_seek(start, end);
     volume(volume_);
     prime();
 }
// many encoded formats require a 'magic cookie'. if the file has a cookie we get it
// and configure the queue with it
static void MyCopyEncoderCookieToQueue(AudioFileID theFile, AudioQueueRef queue ) {
	UInt32 propertySize;
	OSStatus result = AudioFileGetPropertyInfo (theFile, kAudioFilePropertyMagicCookieData, &propertySize, NULL);
	if (result == noErr && propertySize > 0)
	{
		Byte* magicCookie = (UInt8*)malloc(sizeof(UInt8) * propertySize);	
		CheckError(AudioFileGetProperty (theFile, kAudioFilePropertyMagicCookieData, &propertySize, magicCookie), "get cookie from file failed");
		CheckError(AudioQueueSetProperty(queue, kAudioQueueProperty_MagicCookie, magicCookie, propertySize), "set cookie on queue failed");
		free(magicCookie);
	}
}
Ejemplo n.º 6
0
void	GetFormatFromInputFile (AudioFileID inputFile, CAStreamBasicDescription & inputFormat)
{
	bool doPrint = true;
	UInt32 size;
	XThrowIfError(AudioFileGetPropertyInfo(inputFile,
                                           kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info");
	UInt32 numFormats = size / sizeof(AudioFormatListItem);
	AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ];
    
	XThrowIfError(AudioFileGetProperty(inputFile,
                                       kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format");
	numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it
	if (numFormats == 1) {
        // this is the common case
		inputFormat = formatList[0].mASBD;
	} else {
		if (doPrint) {
			printf ("File has a %d layered data format:\n", (int)numFormats);
			for (unsigned int i = 0; i < numFormats; ++i)
				CAStreamBasicDescription(formatList[i].mASBD).Print();
			printf("\n");
		}
		// now we should look to see which decoders we have on the system
		XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's");
		UInt32 numDecoders = size / sizeof(OSType);
		OSType *decoderIDs = new OSType [ numDecoders ];
		XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's");
		unsigned int i = 0;
		for (; i < numFormats; ++i) {
			OSType decoderID = formatList[i].mASBD.mFormatID;
			bool found = false;
			for (unsigned int j = 0; j < numDecoders; ++j) {
				if (decoderID == decoderIDs[j]) {
					found = true;
					break;
				}
			}
			if (found) break;
		}
		delete [] decoderIDs;
		
		if (i >= numFormats) {
			fprintf (stderr, "Cannot play any of the formats in this file\n");
			throw kAudioFileUnsupportedDataFormatError;
		}
		inputFormat = formatList[i].mASBD;
	}
	delete [] formatList;
}
// Sets the packet table containing information about the number of valid frames in a file and where they begin and end
// for the file types that support this information.
// Calling this function makes sure we write out the priming and remainder details to the destination file	
static void WritePacketTableInfo(AudioConverterRef converter, AudioFileID destinationFileID)
{
    UInt32 isWritable;
    UInt32 dataSize;
    OSStatus error = AudioFileGetPropertyInfo(destinationFileID, kAudioFilePropertyPacketTableInfo, &dataSize, &isWritable);
    if (noErr == error && isWritable) {

        AudioConverterPrimeInfo primeInfo;
        dataSize = sizeof(primeInfo);

        // retrieve the leadingFrames and trailingFrames information from the converter,
        error = AudioConverterGetProperty(converter, kAudioConverterPrimeInfo, &dataSize, &primeInfo);
        if (noErr == error) {
            // we have some priming information to write out to the destination file
            /* The total number of packets in the file times the frames per packet (or counting each packet's
               frames individually for a variable frames per packet format) minus mPrimingFrames, minus
               mRemainderFrames, should equal mNumberValidFrames.
            */
            AudioFilePacketTableInfo pti;
            dataSize = sizeof(pti);
            error = AudioFileGetProperty(destinationFileID, kAudioFilePropertyPacketTableInfo, &dataSize, &pti);
            if (noErr == error) {
                // there's priming to write out to the file
                UInt64 totalFrames = pti.mNumberValidFrames + pti.mPrimingFrames + pti.mRemainderFrames; // get the total number of frames from the output file
                printf("Total number of frames from output file: %lld\n", totalFrames);
                
                pti.mPrimingFrames = primeInfo.leadingFrames;
                pti.mRemainderFrames = primeInfo.trailingFrames;
                pti.mNumberValidFrames = totalFrames - pti.mPrimingFrames - pti.mRemainderFrames;
            
                error = AudioFileSetProperty(destinationFileID, kAudioFilePropertyPacketTableInfo, sizeof(pti), &pti);
                if (noErr == error) {
                    printf("Writing packet table information to destination file: %ld\n", sizeof(pti));
                    printf("     Total valid frames: %lld\n", pti.mNumberValidFrames);
                    printf("         Priming frames: %ld\n", pti.mPrimingFrames);
                    printf("       Remainder frames: %ld\n", pti.mRemainderFrames);
                } else {
                    printf("Some audio files can't contain packet table information and that's OK\n");
                }
            } else {
                 printf("Getting kAudioFilePropertyPacketTableInfo error: %ld\n", error);
            }
        } else {
            printf("No kAudioConverterPrimeInfo available and that's OK\n");
        }
    } else {
        printf("GetPropertyInfo for kAudioFilePropertyPacketTableInfo error: %ld, isWritable: %ld\n", error, isWritable);
    }
}
Ejemplo n.º 8
0
		static OSStatus AttachNewCookie(AudioQueueRef inQueue, BackgroundTrackMgr::BG_FileInfo *inFileInfo)
		{
			OSStatus result = noErr;
			UInt32 size = sizeof(UInt32);
			result = AudioFileGetPropertyInfo (inFileInfo->mAFID, kAudioFilePropertyMagicCookieData, &size, NULL);
			if (!result && size) 
			{
				char* cookie = new char [size];		
				result = AudioFileGetProperty (inFileInfo->mAFID, kAudioFilePropertyMagicCookieData, &size, cookie);
					AssertNoError("Error getting cookie data", end);
				result = AudioQueueSetProperty(inQueue, kAudioQueueProperty_MagicCookie, cookie, size);
				delete [] cookie;
					AssertNoError("Error setting cookie data for queue", end);
			}
			return noErr;
		
		end:
			return noErr;
		}
// Some audio formats have a magic cookie associated with them which is required to decompress audio data
// When converting audio data you must check to see if the format of the data has a magic cookie
// If the audio data format has a magic cookie associated with it, you must add this information to anAudio Converter
// using AudioConverterSetProperty and kAudioConverterDecompressionMagicCookie to appropriately decompress the data
// http://developer.apple.com/mac/library/qa/qa2001/qa1318.html
static void ReadCookie(AudioFileID sourceFileID, AudioConverterRef converter)
{
    // grab the cookie from the source file and set it on the converter
	UInt32 cookieSize = 0;
	OSStatus error = AudioFileGetPropertyInfo(sourceFileID, kAudioFilePropertyMagicCookieData, &cookieSize, NULL);
    
    // if there is an error here, then the format doesn't have a cookie - this is perfectly fine as some formats do not
	if (noErr == error && 0 != cookieSize) {
		char* cookie = new char [cookieSize];
		
		error = AudioFileGetProperty(sourceFileID, kAudioFilePropertyMagicCookieData, &cookieSize, cookie);
        if (noErr == error) {
            error = AudioConverterSetProperty(converter, kAudioConverterDecompressionMagicCookie, cookieSize, cookie);
            if (error) printf("Could not Set kAudioConverterDecompressionMagicCookie on the Audio Converter!\n");
        } else {
            printf("Could not Get kAudioFilePropertyMagicCookieData from source file!\n");
        }
		
		delete [] cookie;
	}
}
Ejemplo n.º 10
0
const OSStatus AudioFile::setClientFormat(const AudioStreamBasicDescription clientASBD)
{
  checkError(AudioConverterNew(&mInputFormat, &clientASBD, &mAudioConverterRef), "AudioConverterNew");
  mClientFormat = clientASBD;
  
  UInt32 size = sizeof(UInt32);
  UInt32 maxPacketSize;
  checkError(AudioFileGetProperty(mAudioFileID, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "AudioFileGetProperty");
  
  if (mIsVBR) {
    mPacketDescs = (AudioStreamPacketDescription*)calloc(mNumPacketsToRead, sizeof(AudioStreamPacketDescription));
  }
  
  // set magic cookie to the AudioConverter
  {
    UInt32 cookieSize;
    OSStatus err = AudioFileGetPropertyInfo(mAudioFileID, 
                                            kAudioFilePropertyMagicCookieData, 
                                            &cookieSize, 
                                            NULL);
    
    if (err == noErr && cookieSize > 0){
      char *magicCookie = (char*)malloc(sizeof(UInt8) * cookieSize);
      UInt32	magicCookieSize = cookieSize;
      AudioFileGetProperty(mAudioFileID,
                           kAudioFilePropertyMagicCookieData,
                           &magicCookieSize,
                           magicCookie);
      
      AudioConverterSetProperty(mAudioConverterRef,
                                kAudioConverterDecompressionMagicCookie,
                                magicCookieSize,
                                magicCookie);
      free(magicCookie);
    }
  }
  
  return noErr;
}
// Write output channel layout to destination file
static void WriteDestinationChannelLayout(AudioConverterRef converter, AudioFileID sourceFileID, AudioFileID destinationFileID)
{
    UInt32 layoutSize = 0;
    bool layoutFromConverter = true;
    
    OSStatus error = AudioConverterGetPropertyInfo(converter, kAudioConverterOutputChannelLayout, &layoutSize, NULL);
        
    // if the Audio Converter doesn't have a layout see if the input file does
    if (error || 0 == layoutSize) {
        error = AudioFileGetPropertyInfo(sourceFileID, kAudioFilePropertyChannelLayout, &layoutSize, NULL);
        layoutFromConverter = false;
    }
    
    if (noErr == error && 0 != layoutSize) {
        char* layout = new char[layoutSize];
        
        if (layoutFromConverter) {
            error = AudioConverterGetProperty(converter, kAudioConverterOutputChannelLayout, &layoutSize, layout);
            if (error) printf("Could not Get kAudioConverterOutputChannelLayout from Audio Converter!\n");
        } else {
            error = AudioFileGetProperty(sourceFileID, kAudioFilePropertyChannelLayout, &layoutSize, layout);
            if (error) printf("Could not Get kAudioFilePropertyChannelLayout from source file!\n");
        }
        
        if (noErr == error) {
            error = AudioFileSetProperty(destinationFileID, kAudioFilePropertyChannelLayout, layoutSize, layout);
            if (noErr == error) {
                printf("Writing channel layout to destination file: %ld\n", layoutSize);
            } else {
                printf("Even though some formats have layouts, some files don't take them and that's OK\n");
            }
        }
        
        delete [] layout;
    }
}
Ejemplo n.º 12
0
//-----------------------------------------------------------------------------
bool AudioFile::Load()
{
#if MAC
	AudioFileID mAudioFileID;
    AudioStreamBasicDescription fileDescription, outputFormat;
    SInt64 dataSize64;
    UInt32 dataSize;
	
	OSStatus err;
	UInt32 size;
	
    // ファイルを開く
	FSRef	ref;
	Boolean	isDirectory=false;
	FSPathMakeRef((const UInt8*)GetFilePath(), &ref, &isDirectory);
	
	err = AudioFileOpen(&ref, fsRdPerm, 0, &mAudioFileID);
    if (err) {
        //NSLog(@"AudioFileOpen failed");
        return false;
    }
	
    // 開いたファイルの基本情報を fileDescription へ
    size = sizeof(AudioStreamBasicDescription);
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyDataFormat, 
							   &size, &fileDescription);
    if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	
    // 開いたファイルのデータ部のバイト数を dataSize へ
    size = sizeof(SInt64);
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyAudioDataByteCount, 
							   &size, &dataSize64);
    if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	dataSize = static_cast<UInt32>(dataSize64);
	
	AudioFileTypeID	fileTypeID;
	size = sizeof( AudioFileTypeID );
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFileFormat, &size, &fileTypeID);
	if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	
	// Instrument情報を初期化
	mInstData.basekey	= 60;
	mInstData.lowkey	= 0;
	mInstData.highkey	= 127;
	mInstData.loop		= 0;
	
	//ループポイントの取得
	Float64		st_point=0.0,end_point=0.0;
	if ( fileTypeID == kAudioFileAIFFType || fileTypeID == kAudioFileAIFCType ) {
		//INSTチャンクの取得
		AudioFileGetUserDataSize(mAudioFileID, 'INST', 0, &size);
		if ( size > 4 ) {
			UInt8	*instChunk = new UInt8[size];
			AudioFileGetUserData(mAudioFileID, 'INST', 0, &size, instChunk);
			
			//MIDI情報の取得
			mInstData.basekey = instChunk[0];
			mInstData.lowkey = instChunk[2];
			mInstData.highkey = instChunk[3];
			
			if ( instChunk[9] > 0 ) {	//ループフラグを確認
				//マーカーの取得
				UInt32	writable;
				err = AudioFileGetPropertyInfo(mAudioFileID, kAudioFilePropertyMarkerList,
											   &size, &writable);
				if (err) {
					//NSLog(@"AudioFileGetPropertyInfo failed");
					AudioFileClose(mAudioFileID);
					return NULL;
				}
				UInt8	*markersBuffer = new UInt8[size];
				AudioFileMarkerList	*markers = reinterpret_cast<AudioFileMarkerList*>(markersBuffer);
				
				err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyMarkerList, 
										   &size, markers);
				if (err) {
					//NSLog(@"AudioFileGetProperty failed");
					AudioFileClose(mAudioFileID);
					return NULL;
				}
				
				//ループポイントの設定
				for (unsigned int i=0; i<markers->mNumberMarkers; i++) {
					if (markers->mMarkers[i].mMarkerID == instChunk[11] ) {
						st_point = markers->mMarkers[i].mFramePosition;
					}
					else if (markers->mMarkers[i].mMarkerID == instChunk[13] ) {
						end_point = markers->mMarkers[i].mFramePosition;
					}
					CFRelease(markers->mMarkers[i].mName);
				}
				if ( st_point < end_point ) {
					mInstData.loop = 1;
				}
				delete [] markersBuffer;
			}
			delete [] instChunk;
		}
		
	}
	else if ( fileTypeID == kAudioFileWAVEType ) {
		//smplチャンクの取得
		AudioFileGetUserDataSize( mAudioFileID, 'smpl', 0, &size );
		if ( size >= sizeof(WAV_smpl) ) {
			UInt8	*smplChunk = new UInt8[size];
			AudioFileGetUserData( mAudioFileID, 'smpl', 0, &size, smplChunk );
			WAV_smpl	*smpl = (WAV_smpl *)smplChunk;
			
			smpl->loops = EndianU32_LtoN( smpl->loops );
			
			if ( smpl->loops > 0 ) {
				mInstData.loop = true;
				mInstData.basekey = EndianU32_LtoN( smpl->note );
				st_point = EndianU32_LtoN( smpl->start );
				end_point = EndianU32_LtoN( smpl->end ) + 1;	//SoundForge等では最終ポイントを含める解釈
				//end_point = EndianU32_LtoN( smpl->end );	//PeakではなぜかAIFFと同じ
			}
			else {
				mInstData.basekey = EndianU32_LtoN( smpl->note );
			}
			delete [] smplChunk;
		}
	}
	
	//容量の制限
	SInt64	dataSamples = dataSize / fileDescription.mBytesPerFrame;
	if ( dataSamples > MAXIMUM_SAMPLES ) {
		dataSize = MAXIMUM_SAMPLES * fileDescription.mBytesPerFrame;
	}
	if ( st_point > MAXIMUM_SAMPLES ) {
		st_point = MAXIMUM_SAMPLES;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		end_point = MAXIMUM_SAMPLES;
	}
	
    // 波形一時読み込み用メモリを確保
    char *fileBuffer;
	unsigned int	fileBufferSize;
	if (mInstData.loop) {
		fileBufferSize = dataSize+EXPAND_BUFFER*fileDescription.mBytesPerFrame;
	}
	else {
		fileBufferSize = dataSize;
	}
	fileBuffer = new char[fileBufferSize];
	memset(fileBuffer, 0, fileBufferSize);
	
	// ファイルから波形データの読み込み
	err = AudioFileReadBytes(mAudioFileID, false, 0, &dataSize, fileBuffer);
    if (err) {
        //NSLog(@"AudioFileReadBytes failed");
        AudioFileClose(mAudioFileID);
        delete [] fileBuffer;
        return false;
    }
    AudioFileClose(mAudioFileID);
	
    //ループを展開する
    Float64	adjustment = 1.0;
    outputFormat=fileDescription;
	if (mInstData.loop) {
		UInt32	plusalpha=0, framestocopy;
		while (plusalpha < EXPAND_BUFFER) {
			framestocopy = 
			(end_point-st_point)>(EXPAND_BUFFER-plusalpha)?(EXPAND_BUFFER-plusalpha):end_point-st_point;
			memcpy(fileBuffer+((int)end_point+plusalpha)*fileDescription.mBytesPerFrame,
				   fileBuffer+(int)st_point*fileDescription.mBytesPerFrame,
				   framestocopy*fileDescription.mBytesPerFrame);
			plusalpha += framestocopy;
		}
		dataSize += plusalpha*fileDescription.mBytesPerFrame;
		
		//16サンプル境界にFIXする
		adjustment = ( (long long)((end_point-st_point)/16) ) / ((end_point-st_point)/16.0);
		st_point *= adjustment;
		end_point *= adjustment;
	}
	outputFormat.mFormatID = kAudioFormatLinearPCM;
    outputFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagsNativeEndian;
	outputFormat.mChannelsPerFrame = 1;
	outputFormat.mBytesPerFrame = sizeof(float);
	outputFormat.mBitsPerChannel = 32;
	outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame;
	
    // バイトオーダー変換用のコンバータを用意
    AudioConverterRef converter;
	err = AudioConverterNew(&fileDescription, &outputFormat, &converter);
    if (err) {
        //NSLog(@"AudioConverterNew failed");
        delete [] fileBuffer;
        return false;
    }
	
	//サンプリングレート変換の質を最高に設定
//	if (fileDescription.mSampleRate != outputFormat.mSampleRate) {
//		size = sizeof(UInt32);
//		UInt32	setProp = kAudioConverterQuality_Max;
//		AudioConverterSetProperty(converter, kAudioConverterSampleRateConverterQuality,
//								  size, &setProp);
//        
//        size = sizeof(UInt32);
//		setProp = kAudioConverterSampleRateConverterComplexity_Mastering;
//		AudioConverterSetProperty(converter, kAudioConverterSampleRateConverterComplexity,
//								  size, &setProp);
//        
//	}
	
    //出力に必要十分なバッファサイズを得る
	UInt32	outputSize = dataSize;
	size = sizeof(UInt32);
	err = AudioConverterGetProperty(converter, kAudioConverterPropertyCalculateOutputBufferSize, 
									&size, &outputSize);
	if (err) {
		//NSLog(@"AudioConverterGetProperty failed");
		delete [] fileBuffer;
		AudioConverterDispose(converter);
        return false;
	}
    UInt32 monoSamples = outputSize/sizeof(float);
    
    // バイトオーダー変換
    float *monoData = new float[monoSamples];
	AudioConverterConvertBuffer(converter, dataSize, fileBuffer,
								&outputSize, monoData);
    if(outputSize == 0) {
        //NSLog(@"AudioConverterConvertBuffer failed");
        delete [] fileBuffer;
        AudioConverterDispose(converter);
        return false;
    }
    
    //ループ長が16の倍数でない場合はサンプリングレート変換
    Float64 inputSampleRate = fileDescription.mSampleRate;
    Float64 outputSampleRate = fileDescription.mSampleRate * adjustment;
    int	outSamples = monoSamples;
    if ( outputSampleRate == inputSampleRate ) {
        m_pAudioData = new short[monoSamples];
        for (int i=0; i<monoSamples; i++) {
            m_pAudioData[i] = static_cast<short>(monoData[i] * 32768);
        }
    }
    else {
        outSamples = static_cast<int>(monoSamples / (inputSampleRate / outputSampleRate));
        m_pAudioData = new short[outSamples];
        resampling(monoData, monoSamples, inputSampleRate,
                   m_pAudioData, &outSamples, outputSampleRate);
    }
    
    // 後始末
    delete [] monoData;
    delete [] fileBuffer;
    AudioConverterDispose(converter);
	
	//Instデータの設定
	if ( st_point > MAXIMUM_SAMPLES ) {
		mInstData.lp = MAXIMUM_SAMPLES;
	}
	else {
		mInstData.lp			= st_point;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		mInstData.lp_end = MAXIMUM_SAMPLES;
	}
	else {
		mInstData.lp_end		= end_point;
	}
	mInstData.srcSamplerate	= outputSampleRate;
    mLoadedSamples			= outSamples;
	
	mIsLoaded = true;
	
	return true;
#else
	//Windowsのオーディオファイル読み込み処理

	// ファイルを開く
	HMMIO	hmio = NULL;
	MMRESULT	err;
	DWORD		size;

	hmio = mmioOpen( mPath, NULL, MMIO_READ );
	if ( !hmio ) {
		return false;
	}
	
	// RIFFチャンクを探す
	MMCKINFO	riffChunkInfo;
	riffChunkInfo.fccType = mmioFOURCC('W', 'A', 'V', 'E');
	err = mmioDescend( hmio, &riffChunkInfo, NULL, MMIO_FINDRIFF );
	if ( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( (riffChunkInfo.ckid != FOURCC_RIFF) || (riffChunkInfo.fccType != mmioFOURCC('W', 'A', 'V', 'E') ) ) {
		mmioClose( hmio, 0 );
		return false;
	}

	// フォーマットチャンクを探す
	MMCKINFO	formatChunkInfo;
	formatChunkInfo.ckid = mmioFOURCC('f', 'm', 't', ' ');
	err = mmioDescend( hmio, &formatChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if ( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( formatChunkInfo.cksize < sizeof(PCMWAVEFORMAT) ) {
		mmioClose( hmio, 0 );
		return false;
	}

	//フォーマット情報を取得
	WAVEFORMATEX	pcmWaveFormat;
	DWORD			fmsize = (formatChunkInfo.cksize > sizeof(WAVEFORMATEX)) ? sizeof(WAVEFORMATEX):formatChunkInfo.cksize;
	size = mmioRead( hmio, (HPSTR)&pcmWaveFormat, fmsize );
	if ( size != fmsize ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( pcmWaveFormat.wFormatTag != WAVE_FORMAT_PCM ) {
		mmioClose( hmio, 0 );
		return false;
	}
	mmioAscend(hmio, &formatChunkInfo, 0);

	// Instrument情報を初期化
	mInstData.basekey	= 60;
	mInstData.lowkey	= 0;
	mInstData.highkey	= 127;
	mInstData.loop		= 0;

	//smplチャンクを探す
	MMCKINFO	smplChunkInfo;
	smplChunkInfo.ckid = mmioFOURCC('s', 'm', 'p', 'l');
	err = mmioDescend( hmio, &smplChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if ( err != MMSYSERR_NOERROR ) {
		smplChunkInfo.cksize = 0;
	}
	double	st_point=0.0;
	double	end_point=0.0;
	if ( smplChunkInfo.cksize >= sizeof(WAV_smpl) ) {
		//ループポイントの取得
		unsigned char	*smplChunk = new unsigned char[smplChunkInfo.cksize];
		size = mmioRead(hmio,(HPSTR)smplChunk, smplChunkInfo.cksize);
		WAV_smpl	*smpl = (WAV_smpl *)smplChunk;

		if ( smpl->loops > 0 ) {
			mInstData.loop = 1;
			mInstData.basekey = smpl->note;
			st_point = smpl->start;
			end_point = smpl->end + 1;	//SoundForge等では最終ポイントを含める解釈
		}
		else {
			mInstData.basekey = smpl->note;
		}
		delete [] smplChunk;
	}
	mmioAscend(hmio, &formatChunkInfo, 0);

	//dataチャンクを探す
	MMCKINFO dataChunkInfo;
	dataChunkInfo.ckid = mmioFOURCC('d', 'a', 't', 'a');
	err = mmioDescend( hmio, &dataChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}

	// 波形一時読み込み用メモリを確保
	unsigned int	dataSize = dataChunkInfo.cksize;
	int				bytesPerSample = pcmWaveFormat.nBlockAlign;
	char			*fileBuffer;
	unsigned int	fileBufferSize;

	//容量制限
	int	dataSamples = dataSize / pcmWaveFormat.nBlockAlign;
	if ( dataSamples > MAXIMUM_SAMPLES ) {
		dataSize = MAXIMUM_SAMPLES * pcmWaveFormat.nBlockAlign;
	}
	if ( st_point > MAXIMUM_SAMPLES ) {
		st_point = MAXIMUM_SAMPLES;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		end_point = MAXIMUM_SAMPLES;
	}
	
	
	if (mInstData.loop) {
		fileBufferSize = dataSize+EXPAND_BUFFER*bytesPerSample;
	}
	else {
		fileBufferSize = dataSize;
	}
	fileBuffer = new char[fileBufferSize];
	memset(fileBuffer, 0, fileBufferSize);
	
	// ファイルから波形データの読み込み
	size = mmioRead(hmio, (HPSTR)fileBuffer, dataSize);
	if ( size != dataSize ) {
		mmioClose( hmio, 0 );
		return false;
	}
	mmioClose(hmio,0);

	//ループを展開する
	double	inputSampleRate = pcmWaveFormat.nSamplesPerSec;
	double	outputSampleRate = inputSampleRate;
	if (mInstData.loop) {
		unsigned int	plusalpha=0;
		double			framestocopy;
		while (plusalpha < EXPAND_BUFFER) {
			framestocopy = 
			(end_point-st_point)>(EXPAND_BUFFER-plusalpha)?(EXPAND_BUFFER-plusalpha):end_point-st_point;
			memcpy(fileBuffer+((int)end_point+plusalpha)*bytesPerSample,
				   fileBuffer+(int)st_point*bytesPerSample,
				   static_cast<size_t>(framestocopy*bytesPerSample));
			plusalpha += static_cast<unsigned int>(framestocopy);
		}
		dataSize += plusalpha*bytesPerSample;
		
		//16サンプル境界にFIXする
		double	adjustment = ( (long long)((end_point-st_point)/16) ) / ((end_point-st_point)/16.0);
		outputSampleRate *= adjustment;
		st_point *= adjustment;
		end_point *= adjustment;
	}

	//一旦floatモノラルデータに変換
	int	bytesPerChannel = bytesPerSample / pcmWaveFormat.nChannels;
	unsigned int	inputPtr = 0;
	unsigned int	outputPtr = 0;
	int				monoSamples = dataSize / bytesPerSample;
	float	range = static_cast<float>((1<<(bytesPerChannel*8-1)) * pcmWaveFormat.nChannels);
	float	*monoData = new float[monoSamples];
	while (inputPtr < dataSize) {
		int	frameSum = 0;
		for (int ch=0; ch<pcmWaveFormat.nChannels; ch++) {
			for (int i=0; i<bytesPerChannel; i++) {
				if (i<bytesPerChannel-1) {
					frameSum += (unsigned char)fileBuffer[inputPtr] << (8*i);
				}
				else {
					frameSum += fileBuffer[inputPtr] << (8*i);
				}
				inputPtr++;
			}
		}
		monoData[outputPtr] = frameSum / range;
		outputPtr++;
	}

	//ループ長が16の倍数でない場合はサンプリングレート変換
	int	outSamples = monoSamples;
	if ( outputSampleRate == inputSampleRate ) {
		m_pAudioData = new short[monoSamples];
		for (int i=0; i<monoSamples; i++) {
			m_pAudioData[i] = static_cast<short>(monoData[i] * 32768);
		}
	}
	else {
		outSamples = static_cast<int>(monoSamples / (inputSampleRate / outputSampleRate));
		m_pAudioData = new short[outSamples];
		resampling(monoData, monoSamples, inputSampleRate,
				   m_pAudioData, &outSamples, outputSampleRate);
	}

	// 後始末
	delete [] fileBuffer;
	delete [] monoData;

	//Instデータの設定
	mInstData.lp			= static_cast<int>(st_point);
	mInstData.lp_end		= static_cast<int>(end_point);
	mInstData.srcSamplerate	= outputSampleRate;
    mLoadedSamples			= outSamples;

	mIsLoaded = true;

	return true;
#endif
}
Ejemplo n.º 13
0
int main (int argc, const char * argv[]) 
{
#if TARGET_OS_WIN32
	InitializeQTML(0L);
#endif
	const char *fpath = NULL;
	Float32 volume = 1;
	Float32 duration = -1;
	Float32 currentTime = 0.0;
	Float32 rate = 0;
	int rQuality = 0;
	
	bool doPrint = false;
	for (int i = 1; i < argc; ++i) {
		const char *arg = argv[i];
		if (arg[0] != '-') {
			if (fpath != NULL) {
				fprintf(stderr, "may only specify one file to play\n");
				usage();
			}
			fpath = arg;
		} else {
			arg += 1;
			if (arg[0] == 'v' || !strcmp(arg, "-volume")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];
				sscanf(arg, "%f", &volume);
			} else if (arg[0] == 't' || !strcmp(arg, "-time")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%f", &duration);
			} else if (arg[0] == 'r' || !strcmp(arg, "-rate")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%f", &rate);
			} else if (arg[0] == 'q' || !strcmp(arg, "-rQuality")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%d", &rQuality);
			} else if (arg[0] == 'h' || !strcmp(arg, "-help")) {
				usage();
			} else if (arg[0] == 'd' || !strcmp(arg, "-debug")) {
				doPrint = true;
			} else {
				fprintf(stderr, "unknown argument: %s\n\n", arg - 1);
				usage();
			}
		}
	}

	if (fpath == NULL)
		usage();
	
	if (doPrint)
		printf ("Playing file: %s\n", fpath);
	
	try {
		AQTestInfo myInfo;
		
		CFURLRef sndFile = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)fpath, strlen(fpath), false);
		if (!sndFile) XThrowIfError (!sndFile, "can't parse file path");
			
		OSStatus result = AudioFileOpenURL (sndFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile);
		CFRelease (sndFile);
						
		XThrowIfError(result, "AudioFileOpen failed");
		
		UInt32 size;
		XThrowIfError(AudioFileGetPropertyInfo(myInfo.mAudioFile, 
									kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info");
		UInt32 numFormats = size / sizeof(AudioFormatListItem);
		AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ];

		XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, 
									kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format");
		numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it
		if (numFormats == 1) {
				// this is the common case
			myInfo.mDataFormat = formatList[0].mASBD;
			
				// see if there is a channel layout (multichannel file)
			result = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, NULL);
			if (result == noErr && myInfo.mChannelLayoutSize > 0) {
				myInfo.mChannelLayout = (AudioChannelLayout *)new char [myInfo.mChannelLayoutSize];
				XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, myInfo.mChannelLayout), "get audio file's channel layout");
			}
		} else {
			if (doPrint) {
				printf ("File has a %d layered data format:\n", (int)numFormats);
				for (unsigned int i = 0; i < numFormats; ++i)
					CAStreamBasicDescription(formatList[i].mASBD).Print();
			}
			// now we should look to see which decoders we have on the system
			XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's");
			UInt32 numDecoders = size / sizeof(OSType);
			OSType *decoderIDs = new OSType [ numDecoders ];
			XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's");			
			unsigned int i = 0;
			for (; i < numFormats; ++i) {
				OSType decoderID = formatList[i].mASBD.mFormatID;
				bool found = false;
				for (unsigned int j = 0; j < numDecoders; ++j) {
					if (decoderID == decoderIDs[j]) {
						found = true;
						break;
					}
				}
				if (found) break;
			}
			delete [] decoderIDs;
			
			if (i >= numFormats) {
				fprintf (stderr, "Cannot play any of the formats in this file\n");
				throw kAudioFileUnsupportedDataFormatError;
			}
			myInfo.mDataFormat = formatList[i].mASBD;
			myInfo.mChannelLayoutSize = sizeof(AudioChannelLayout);
			myInfo.mChannelLayout = (AudioChannelLayout*)new char [myInfo.mChannelLayoutSize];
			myInfo.mChannelLayout->mChannelLayoutTag = formatList[i].mChannelLayoutTag;
			myInfo.mChannelLayout->mChannelBitmap = 0;
			myInfo.mChannelLayout->mNumberChannelDescriptions = 0;
		}
		delete [] formatList;
		
		if (doPrint) {
			printf ("Playing format: "); 
			myInfo.mDataFormat.Print();
		}
		
		XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat, AQTestBufferCallback, &myInfo, 
									CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &myInfo.mQueue), "AudioQueueNew failed");

		UInt32 bufferByteSize;		
		// we need to calculate how many packets we read at a time, and how big a buffer we need
		// we base this on the size of the packets in the file and an approximate duration for each buffer
		{
			bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0);
			
			// first check to see what the max size of a packet is - if it is bigger
			// than our allocation default size, that needs to become larger
			UInt32 maxPacketSize;
			size = sizeof(maxPacketSize);
			XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, 
									kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size");
			
			// adjust buffer size to represent about a half second of audio based on this format
			CalculateBytesForTime (myInfo.mDataFormat, maxPacketSize, 0.5/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead);
			
			if (isFormatVBR)
				myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead];
			else
				myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
				
			if (doPrint)
				printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead);
		}

		// (2) If the file has a cookie, we should get it and set it on the AQ
		size = sizeof(UInt32);
		result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL);

		if (!result && size) {
			char* cookie = new char [size];		
			XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file");
			XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue");
			delete [] cookie;
		}

		// set ACL if there is one
		if (myInfo.mChannelLayout)
			XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, myInfo.mChannelLayout, myInfo.mChannelLayoutSize), "set channel layout on queue");

		// prime the queue with some data before starting
		myInfo.mDone = false;
		myInfo.mCurrentPacket = 0;
		for (int i = 0; i < kNumberBuffers; ++i) {
			XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffers[i]), "AudioQueueAllocateBuffer failed");

			AQTestBufferCallback (&myInfo, myInfo.mQueue, myInfo.mBuffers[i]);
			
			if (myInfo.mDone) break;
		}	
			// set the volume of the queue
		XThrowIfError (AudioQueueSetParameter(myInfo.mQueue, kAudioQueueParam_Volume, volume), "set queue volume");
		
		XThrowIfError (AudioQueueAddPropertyListener (myInfo.mQueue, kAudioQueueProperty_IsRunning, MyAudioQueuePropertyListenerProc, NULL), "add listener");
		
#if !TARGET_OS_IPHONE
		if (rate > 0) {
			UInt32 propValue = 1;
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_EnableTimePitch, &propValue, sizeof(propValue)), "enable time pitch");
			
			propValue = rQuality ? kAudioQueueTimePitchAlgorithm_Spectral : kAudioQueueTimePitchAlgorithm_TimeDomain;
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchAlgorithm, &propValue, sizeof(propValue)), "time pitch algorithm");
			
			propValue = (rate == 1.0f ? 1 : 0); // bypass rate if 1.0
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchBypass, &propValue, sizeof(propValue)), "bypass time pitch");
			if (rate != 1) {
				XThrowIfError (AudioQueueSetParameter (myInfo.mQueue, kAudioQueueParam_PlayRate, rate), "set playback rate");
			}
			
			if (doPrint) {
				printf ("Enable rate-scaled playback (rate = %.2f) using %s algorithm\n", rate, (rQuality ? "Spectral": "Time Domain"));
			}
		}
#endif
			// lets start playing now - stop is called in the AQTestBufferCallback when there's
			// no more to read from the file
		XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed");

		do {
			CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false);
			currentTime += .25;
			if (duration > 0 && currentTime >= duration)
				break;
			
		} while (gIsRunning);
			
		CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false);

		XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed");
		XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed");
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
	}
	catch (...) {
		fprintf(stderr, "Unspecified exception\n");
	}
	
    return 0;
}
Ejemplo n.º 14
0
void playFile(const char* filePath) {

  CFURLRef audioFileURL = CFURLCreateFromFileSystemRepresentation(NULL,
                                                                  (UInt8*) filePath,
                                                                  strlen (filePath),
                                                                  false);


  OSStatus result = AudioFileOpenURL(audioFileURL,
                                     fsRdPerm,
                                     0,
                                     &aqData.mAudioFile);

  CFRelease (audioFileURL);

  UInt32 dataFormatSize = sizeof (aqData.mDataFormat);

  AudioFileGetProperty(aqData.mAudioFile,
                       kAudioFilePropertyDataFormat,
                       &dataFormatSize,
                       &aqData.mDataFormat);

  AudioQueueNewOutput(&aqData.mDataFormat,
                      HandleOutputBuffer,
                      &aqData,
                      CFRunLoopGetCurrent(),
                      kCFRunLoopCommonModes,
                      0,
                      &aqData.mQueue);

  UInt32 maxPacketSize;
  UInt32 propertySize = sizeof (maxPacketSize);
  AudioFileGetProperty(aqData.mAudioFile,
                       kAudioFilePropertyPacketSizeUpperBound,
                       &propertySize,
                       &maxPacketSize);


  DeriveBufferSize(&aqData.mDataFormat,
                   maxPacketSize,
                   0.5,
                   &aqData.bufferByteSize,
                   &aqData.mNumPacketsToRead);

  bool isFormatVBR = (aqData.mDataFormat.mBytesPerPacket == 0 ||
                      aqData.mDataFormat.mFramesPerPacket == 0);

  if (isFormatVBR) {
    // LOG("%s\n","VBR");
    aqData.mPacketDescs =
      (AudioStreamPacketDescription*)
      malloc (aqData.mNumPacketsToRead * sizeof (AudioStreamPacketDescription));
  } else {
    aqData.mPacketDescs = NULL;
  }

  UInt32 cookieSize = sizeof (UInt32);
  bool couldNotGetProperty =
    AudioFileGetPropertyInfo (aqData.mAudioFile,
                              kAudioFilePropertyMagicCookieData,
                              &cookieSize,
                              NULL);

  if (!couldNotGetProperty && cookieSize) {
    char* magicCookie = (char *) malloc (cookieSize);

    AudioFileGetProperty (aqData.mAudioFile,
                          kAudioFilePropertyMagicCookieData,
                          &cookieSize,
                          magicCookie);

    AudioQueueSetProperty (aqData.mQueue,
                           kAudioQueueProperty_MagicCookie,
                           magicCookie,
                           cookieSize);

    free (magicCookie);
  }

  aqData.mCurrentPacket = 0;
  aqData.mIsRunning = true;

  //LOG("%d\n", aqData.mNumPacketsToRead);
  for (int i = 0; i < kNumberBuffers; ++i) {
    AudioQueueAllocateBuffer (aqData.mQueue,
                              aqData.bufferByteSize,
                              &aqData.mBuffers[i]);

    HandleOutputBuffer (&aqData,
                        aqData.mQueue,
                        aqData.mBuffers[i]);
  }

  Float32 gain = 1.0;
  // Optionally, allow user to override gain setting here
  AudioQueueSetParameter (aqData.mQueue,
                          kAudioQueueParam_Volume,
                          gain);


  //LOG("%s\n","Starting play");


  // IMPORTANT NOTE : This value must be set
  // Before the call to HandleOutputBuffer
  //a qData.mIsRunning = true;

  AudioQueueStart (aqData.mQueue,
                   NULL);

}
Ejemplo n.º 15
0
Archivo: main.cpp Proyecto: ebakan/SMS
void MakeSimpleGraph (AUGraph &theGraph, CAAudioUnit &fileAU, CAStreamBasicDescription &fileFormat, AudioFileID audioFile)
{
	XThrowIfError (NewAUGraph (&theGraph), "NewAUGraph");
	
	CAComponentDescription cd;

	// output node
	cd.componentType = kAudioUnitType_Output;
	cd.componentSubType = kAudioUnitSubType_DefaultOutput;
	cd.componentManufacturer = kAudioUnitManufacturer_Apple;

	AUNode outputNode;
	XThrowIfError (AUGraphAddNode (theGraph, &cd, &outputNode), "AUGraphAddNode");
	
	// file AU node
	AUNode fileNode;
	cd.componentType = kAudioUnitType_Generator;
	cd.componentSubType = kAudioUnitSubType_AudioFilePlayer;
	
	XThrowIfError (AUGraphAddNode (theGraph, &cd, &fileNode), "AUGraphAddNode");
	
	// connect & setup
	XThrowIfError (AUGraphOpen (theGraph), "AUGraphOpen");
	
	// install overload listener to detect when something is wrong
	AudioUnit anAU;
	XThrowIfError (AUGraphNodeInfo(theGraph, fileNode, NULL, &anAU), "AUGraphNodeInfo");
	
	fileAU = CAAudioUnit (fileNode, anAU);

// prepare the file AU for playback
// set its output channels
	XThrowIfError (fileAU.SetNumberChannels (kAudioUnitScope_Output, 0, fileFormat.NumberChannels()), "SetNumberChannels");

// set the output sample rate of the file AU to be the same as the file:
	XThrowIfError (fileAU.SetSampleRate (kAudioUnitScope_Output, 0, fileFormat.mSampleRate), "SetSampleRate");

// load in the file 
	XThrowIfError (fileAU.SetProperty(kAudioUnitProperty_ScheduledFileIDs, 
						kAudioUnitScope_Global, 0, &audioFile, sizeof(audioFile)), "SetScheduleFile");


	XThrowIfError (AUGraphConnectNodeInput (theGraph, fileNode, 0, outputNode, 0), "AUGraphConnectNodeInput");

// AT this point we make sure we have the file player AU initialized
// this also propogates the output format of the AU to the output unit
	XThrowIfError (AUGraphInitialize (theGraph), "AUGraphInitialize");
	
	// workaround a race condition in the file player AU
	usleep (10 * 1000);

// if we have a surround file, then we should try to tell the output AU what the order of the channels will be
	if (fileFormat.NumberChannels() > 2) {
		UInt32 layoutSize = 0;
		OSStatus err;
		XThrowIfError (err = AudioFileGetPropertyInfo (audioFile, kAudioFilePropertyChannelLayout, &layoutSize, NULL),
								"kAudioFilePropertyChannelLayout");
		
		if (!err && layoutSize) {
			char* layout = new char[layoutSize];
			
			err = AudioFileGetProperty(audioFile, kAudioFilePropertyChannelLayout, &layoutSize, layout);
			XThrowIfError (err, "Get Layout From AudioFile");
			
			// ok, now get the output AU and set its layout
			XThrowIfError (AUGraphNodeInfo(theGraph, outputNode, NULL, &anAU), "AUGraphNodeInfo");
			
			err = AudioUnitSetProperty (anAU, kAudioUnitProperty_AudioChannelLayout, 
							kAudioUnitScope_Input, 0, layout, layoutSize);
			XThrowIfError (err, "kAudioUnitProperty_AudioChannelLayout");
			
			delete [] layout;
		}
	}
}
Ejemplo n.º 16
0
bool SFB::Audio::CoreAudioDecoder::_Open(CFErrorRef *error)
{
	// Open the input file
	OSStatus result = AudioFileOpenWithCallbacks(this, myAudioFile_ReadProc, nullptr, myAudioFile_GetSizeProc, nullptr, 0, &mAudioFile);

	if(noErr != result) {
		LOGGER_CRIT("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileOpenWithCallbacks failed: " << result);
		
		if(error) {
			SFB::CFString description = CFCopyLocalizedString(CFSTR("The format of the file “%@” was not recognized."), "");
			SFB::CFString failureReason = CFCopyLocalizedString(CFSTR("File Format Not Recognized"), "");
			SFB::CFString recoverySuggestion = CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), "");
			
			*error = CreateErrorForURL(Decoder::ErrorDomain, Decoder::InputOutputError, description, mInputSource->GetURL(), failureReason, recoverySuggestion);
		}
		
		return false;
	}
	
	result = ExtAudioFileWrapAudioFileID(mAudioFile, false, &mExtAudioFile);

	if(noErr != result) {
		LOGGER_CRIT("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileWrapAudioFileID failed: " << result);
		
		if(error) {
			SFB::CFString description = CFCopyLocalizedString(CFSTR("The format of the file “%@” was not recognized."), "");
			SFB::CFString failureReason = CFCopyLocalizedString(CFSTR("File Format Not Recognized"), "");
			SFB::CFString recoverySuggestion = CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), "");
			
			*error = CreateErrorForURL(Decoder::ErrorDomain, Decoder::InputOutputError, description, mInputSource->GetURL(), failureReason, recoverySuggestion);
		}

		result = AudioFileClose(mAudioFile);
		if(noErr != result)
			LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result);
		
		mAudioFile = nullptr;
		
		return false;
	}
	
	// Query file format
	UInt32 dataSize = sizeof(mSourceFormat);
	result = ExtAudioFileGetProperty(mExtAudioFile, kExtAudioFileProperty_FileDataFormat, &dataSize, &mSourceFormat);

	if(noErr != result) {
		LOGGER_CRIT("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileGetProperty (kExtAudioFileProperty_FileDataFormat) failed: " << result);
		
		result = ExtAudioFileDispose(mExtAudioFile);
		if(noErr != result)
			LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileDispose failed: " << result);
		
		result = AudioFileClose(mAudioFile);
		if(noErr != result)
			LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result);
		
		mAudioFile = nullptr;		
		mExtAudioFile = nullptr;
		
		return false;
	}
	
	// Tell the ExtAudioFile the format in which we'd like our data
	
	// For Linear PCM formats, leave the data untouched
	if(kAudioFormatLinearPCM == mSourceFormat.mFormatID)
		mFormat = mSourceFormat;
	// For Apple Lossless, convert to high-aligned signed ints in 32 bits
	else if(kAudioFormatAppleLossless == mSourceFormat.mFormatID) {
		mFormat.mFormatID			= kAudioFormatLinearPCM;
		mFormat.mFormatFlags		= kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsAlignedHigh;
		
		mFormat.mSampleRate			= mSourceFormat.mSampleRate;
		mFormat.mChannelsPerFrame	= mSourceFormat.mChannelsPerFrame;
		
		if(kAppleLosslessFormatFlag_16BitSourceData == mSourceFormat.mFormatFlags)
			mFormat.mBitsPerChannel	= 16;
		else if(kAppleLosslessFormatFlag_20BitSourceData == mSourceFormat.mFormatFlags)
			mFormat.mBitsPerChannel	= 20;
		else if(kAppleLosslessFormatFlag_24BitSourceData == mSourceFormat.mFormatFlags)
			mFormat.mBitsPerChannel	= 24;
		else if(kAppleLosslessFormatFlag_32BitSourceData == mSourceFormat.mFormatFlags)
			mFormat.mBitsPerChannel	= 32;
		
		mFormat.mBytesPerPacket		= 4 * mFormat.mChannelsPerFrame;
		mFormat.mFramesPerPacket	= 1;
		mFormat.mBytesPerFrame		= mFormat.mBytesPerPacket * mFormat.mFramesPerPacket;
		
		mFormat.mReserved			= 0;
		
	}
	// For all other formats convert to the canonical Core Audio format
	else {
		mFormat.mFormatID			= kAudioFormatLinearPCM;
		mFormat.mFormatFlags		= kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
		
		mFormat.mSampleRate			= mSourceFormat.mSampleRate;
		mFormat.mChannelsPerFrame	= mSourceFormat.mChannelsPerFrame;
		mFormat.mBitsPerChannel		= 32;
		
		mFormat.mBytesPerPacket		= (mFormat.mBitsPerChannel / 8);
		mFormat.mFramesPerPacket	= 1;
		mFormat.mBytesPerFrame		= mFormat.mBytesPerPacket * mFormat.mFramesPerPacket;
		
		mFormat.mReserved			= 0;
	}
	
	result = ExtAudioFileSetProperty(mExtAudioFile, kExtAudioFileProperty_ClientDataFormat, sizeof(mFormat), &mFormat);

	if(noErr != result) {
		LOGGER_CRIT("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileSetProperty (kExtAudioFileProperty_ClientDataFormat) failed: " << result);
		
		result = ExtAudioFileDispose(mExtAudioFile);
		if(noErr != result)
			LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileDispose failed: " << result);
		
		result = AudioFileClose(mAudioFile);
		if(noErr != result)
			LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result);
		
		mAudioFile = nullptr;		
		mExtAudioFile = nullptr;
		
		return false;
	}
	
	// Setup the channel layout
	// There is a bug in EAF where if the underlying AF doesn't return a channel layout it returns an empty struct
//	result = ExtAudioFileGetPropertyInfo(mExtAudioFile, kExtAudioFileProperty_FileChannelLayout, &dataSize, nullptr);
	result = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyChannelLayout, &dataSize, nullptr);
	if(noErr == result) {
		auto channelLayout = (AudioChannelLayout *)malloc(dataSize);
//		result = ExtAudioFileGetProperty(mExtAudioFile, kExtAudioFileProperty_FileChannelLayout, &dataSize, mChannelLayout);
		result = AudioFileGetProperty(mAudioFile, kAudioFilePropertyChannelLayout, &dataSize, channelLayout);

		if(noErr != result) {
//			LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileGetProperty (kExtAudioFileProperty_FileChannelLayout) failed: " << result);
			LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileGetProperty (kAudioFilePropertyChannelLayout) failed: " << result);
			
            free(channelLayout);

			result = ExtAudioFileDispose(mExtAudioFile);
			if(noErr != result)
				LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileDispose failed: " << result);
			
			result = AudioFileClose(mAudioFile);
			if(noErr != result)
				LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result);
			
			mAudioFile = nullptr;		
			mExtAudioFile = nullptr;
			
			return false;
		}

		mChannelLayout = channelLayout;

		free(channelLayout);
	}
	else
//		LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileGetPropertyInfo (kExtAudioFileProperty_FileChannelLayout) failed: " << result);
		LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileGetPropertyInfo (kAudioFilePropertyChannelLayout) failed: " << result);

	// Work around bugs in ExtAudioFile: http://lists.apple.com/archives/coreaudio-api/2009/Nov/msg00119.html
	// Synopsis: ExtAudioFileTell() and ExtAudioFileSeek() are broken for m4a files
	AudioFileID audioFile;
	dataSize = sizeof(audioFile);
	result = ExtAudioFileGetProperty(mExtAudioFile, kExtAudioFileProperty_AudioFile, &dataSize, &audioFile);
	
	if(noErr != result) {
		LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileGetProperty (kExtAudioFileProperty_AudioFile) failed: " << result);
		
		result = ExtAudioFileDispose(mExtAudioFile);
		if(noErr != result)
			LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileDispose failed: " << result);
		
		result = AudioFileClose(mAudioFile);
		if(noErr != result)
			LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result);
		
		mAudioFile = nullptr;		
		mExtAudioFile = nullptr;
		
		return false;
	}
	
	AudioFileTypeID fileFormat;
	dataSize = sizeof(fileFormat);
	result = AudioFileGetProperty(audioFile, kAudioFilePropertyFileFormat, &dataSize, &fileFormat);

	if(noErr != result) {
		LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileGetProperty (kAudioFilePropertyFileFormat) failed: " << result);
		
		result = ExtAudioFileDispose(mExtAudioFile);
		if(noErr != result)
			LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileDispose failed: " << result);
		
		result = AudioFileClose(mAudioFile);
		if(noErr != result)
			LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result);
		
		mAudioFile = nullptr;		
		mExtAudioFile = nullptr;
		
		return false;
	}
	
	if(kAudioFileM4AType == fileFormat || kAudioFileMPEG4Type == fileFormat || kAudioFileAAC_ADTSType == fileFormat)
		mUseM4AWorkarounds = true;
	
#if 0
	// This was supposed to determine if ExtAudioFile had been fixed, but even though
	// it passes on 10.6.2 things are not behaving properly
	SInt64 currentFrame = -1;	
	result = ExtAudioFileTell(mExtAudioFile, &currentFrame);

	if(noErr != result) {
		LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileTell failed: " << result);
		
		result = ExtAudioFileDispose(mExtAudioFile);
		if(noErr != result)
			LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileDispose failed: " << result);
		
		result = AudioFileClose(mAudioFile);
		if(noErr != result)
			LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result);
		
		mAudioFile = nullptr;		
		mExtAudioFile = nullptr;
		
		return false;
	}
	
	if(0 > currentFrame)
		mUseM4AWorkarounds = true;
#endif

	return true;
}
Ejemplo n.º 17
0
void DoAQOfflineRender(CFURLRef sourceURL, CFURLRef destinationURL) 
{
    // main audio queue code
	try {
		AQTestInfo myInfo;
        
		myInfo.mDone = false;
		myInfo.mFlushed = false;
		myInfo.mCurrentPacket = 0;
		
        // get the source file
        XThrowIfError(AudioFileOpenURL(sourceURL, 0x01/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile), "AudioFileOpen failed");
			
		UInt32 size = sizeof(myInfo.mDataFormat);
		XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyDataFormat, &size, &myInfo.mDataFormat), "couldn't get file's data format");
		
		printf ("File format: "); myInfo.mDataFormat.Print();

        // create a new audio queue output
		XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat,      // The data format of the audio to play. For linear PCM, only interleaved formats are supported.
                                          AQTestBufferCallback,     // A callback function to use with the playback audio queue.
                                          &myInfo,                  // A custom data structure for use with the callback function.
                                          CFRunLoopGetCurrent(),    // The event loop on which the callback function pointed to by the inCallbackProc parameter is to be called.
                                                                    // If you specify NULL, the callback is invoked on one of the audio queue’s internal threads.
                                          kCFRunLoopCommonModes,    // The run loop mode in which to invoke the callback function specified in the inCallbackProc parameter. 
                                          0,                        // Reserved for future use. Must be 0.
                                          &myInfo.mQueue),          // On output, the newly created playback audio queue object.
                                          "AudioQueueNew failed");

		UInt32 bufferByteSize;
		
		// we need to calculate how many packets we read at a time and how big a buffer we need
		// we base this on the size of the packets in the file and an approximate duration for each buffer
		{
			bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0);
			
			// first check to see what the max size of a packet is - if it is bigger
			// than our allocation default size, that needs to become larger
			UInt32 maxPacketSize;
			size = sizeof(maxPacketSize);
			XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size");
			
			// adjust buffer size to represent about a second of audio based on this format
			CalculateBytesForTime(myInfo.mDataFormat, maxPacketSize, 1.0/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead);
			
			if (isFormatVBR) {
				myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead];
			} else {
				myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
            }
				
			printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead);
		}

		// if the file has a magic cookie, we should get it and set it on the AQ
		size = sizeof(UInt32);
		OSStatus result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL);

		if (!result && size) {
			char* cookie = new char [size];		
			XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file");
			XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue");
			delete [] cookie;
		}

		// channel layout?
		OSStatus err = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &size, NULL);
		AudioChannelLayout *acl = NULL;
		if (err == noErr && size > 0) {
			acl = (AudioChannelLayout *)malloc(size);
			XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &size, acl), "get audio file's channel layout");
			XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, acl, size), "set channel layout on queue");
		}

		//allocate the input read buffer
		XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffer), "AudioQueueAllocateBuffer");

		// prepare a canonical interleaved capture format
		CAStreamBasicDescription captureFormat;
		captureFormat.mSampleRate = myInfo.mDataFormat.mSampleRate;
		captureFormat.SetAUCanonical(myInfo.mDataFormat.mChannelsPerFrame, true); // interleaved
		XThrowIfError(AudioQueueSetOfflineRenderFormat(myInfo.mQueue, &captureFormat, acl), "set offline render format");			
		
		ExtAudioFileRef captureFile;
        
		// prepare a 16-bit int file format, sample channel count and sample rate
		CAStreamBasicDescription dstFormat;
		dstFormat.mSampleRate = myInfo.mDataFormat.mSampleRate;
		dstFormat.mChannelsPerFrame = myInfo.mDataFormat.mChannelsPerFrame;
		dstFormat.mFormatID = kAudioFormatLinearPCM;
		dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
		dstFormat.mBitsPerChannel = 16;
		dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
		dstFormat.mFramesPerPacket = 1;
		
		// create the capture file
        XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, acl, kAudioFileFlags_EraseFile, &captureFile), "ExtAudioFileCreateWithURL");
		
        // set the capture file's client format to be the canonical format from the queue
		XThrowIfError(ExtAudioFileSetProperty(captureFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &captureFormat), "set ExtAudioFile client format");
		
		// allocate the capture buffer, just keep it at half the size of the enqueue buffer
        // we don't ever want to pull any faster than we can push data in for render
        // this 2:1 ratio keeps the AQ Offline Render happy
		const UInt32 captureBufferByteSize = bufferByteSize / 2;
		
        AudioQueueBufferRef captureBuffer;
		AudioBufferList captureABL;
		
		XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, captureBufferByteSize, &captureBuffer), "AudioQueueAllocateBuffer");
		
        captureABL.mNumberBuffers = 1;
		captureABL.mBuffers[0].mData = captureBuffer->mAudioData;
		captureABL.mBuffers[0].mNumberChannels = captureFormat.mChannelsPerFrame;

		// lets start playing now - stop is called in the AQTestBufferCallback when there's
		// no more to read from the file
		XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed");

		AudioTimeStamp ts;
		ts.mFlags = kAudioTimeStampSampleTimeValid;
		ts.mSampleTime = 0;

		// we need to call this once asking for 0 frames
		XThrowIfError(AudioQueueOfflineRender(myInfo.mQueue, &ts, captureBuffer, 0), "AudioQueueOfflineRender");

		// we need to enqueue a buffer after the queue has started
		AQTestBufferCallback(&myInfo, myInfo.mQueue, myInfo.mBuffer);

		while (true) {
			UInt32 reqFrames = captureBufferByteSize / captureFormat.mBytesPerFrame;
			
            XThrowIfError(AudioQueueOfflineRender(myInfo.mQueue, &ts, captureBuffer, reqFrames), "AudioQueueOfflineRender");
			
            captureABL.mBuffers[0].mData = captureBuffer->mAudioData;
			captureABL.mBuffers[0].mDataByteSize = captureBuffer->mAudioDataByteSize;
			UInt32 writeFrames = captureABL.mBuffers[0].mDataByteSize / captureFormat.mBytesPerFrame;
			
            printf("t = %.f: AudioQueueOfflineRender:  req %d fr/%d bytes, got %ld fr/%d bytes\n", ts.mSampleTime, (int)reqFrames, (int)captureBufferByteSize, writeFrames, (int)captureABL.mBuffers[0].mDataByteSize);
            
			XThrowIfError(ExtAudioFileWrite(captureFile, writeFrames, &captureABL), "ExtAudioFileWrite");
			
            if (myInfo.mFlushed) break;
			
			ts.mSampleTime += writeFrames;
		}

		CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false);

		XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed");
		XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed");
		XThrowIfError(ExtAudioFileDispose(captureFile), "ExtAudioFileDispose failed");

		if (myInfo.mPacketDescs) delete [] myInfo.mPacketDescs;
		if (acl) free(acl);
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
	}
    
    return;
}
Ejemplo n.º 18
0
// _______________________________________________________________________________________
//
// called to create the file -- or update its format/channel layout/properties based on an encoder
// setting change
void	CAAudioFile::FileFormatChanged(const FSRef *parentDir, CFStringRef filename, AudioFileTypeID filetype)
{
	LOG_FUNCTION("CAAudioFile::FileFormatChanged", "%p", this);
	XThrowIf(mMode != kPreparingToCreate && mMode != kPreparingToWrite, kExtAudioFileError_InvalidOperationOrder, "new file not prepared");

	UInt32 propertySize;
	OSStatus err;
	AudioStreamBasicDescription saveFileDataFormat = mFileDataFormat;

#if VERBOSE_CONVERTER
	mFileDataFormat.PrintFormat(stdout, "", "Specified file data format");
#endif

	// Find out the actual format the converter will produce. This is necessary in
	// case the bitrate has forced a lower sample rate, which needs to be set correctly
	// in the stream description passed to AudioFileCreate.
	if (mConverter != NULL) {
		propertySize = sizeof(AudioStreamBasicDescription);
		Float64 origSampleRate = mFileDataFormat.mSampleRate;
		XThrowIfError(AudioConverterGetProperty(mConverter, kAudioConverterCurrentOutputStreamDescription, &propertySize, &mFileDataFormat), "get audio converter's output stream description");
		// do the same for the channel layout being output by the converter
#if VERBOSE_CONVERTER
		mFileDataFormat.PrintFormat(stdout, "", "Converter output");
#endif
		if (fiszero(mFileDataFormat.mSampleRate))
			mFileDataFormat.mSampleRate = origSampleRate;
		err = AudioConverterGetPropertyInfo(mConverter, kAudioConverterOutputChannelLayout, &propertySize, NULL);
		if (err == noErr && propertySize > 0) {
			AudioChannelLayout *layout = static_cast<AudioChannelLayout *>(malloc(propertySize));
			err = AudioConverterGetProperty(mConverter, kAudioConverterOutputChannelLayout, &propertySize, layout);
			if (err) {
				free(layout);
				XThrow(err, "couldn't get audio converter's output channel layout");
			}
			mFileChannelLayout = layout;
#if VERBOSE_CHANNELMAP
			printf("got new file's channel layout from converter: %s\n", CAChannelLayouts::ConstantToString(mFileChannelLayout.Tag()));
#endif
			free(layout);
		}
	}

	// create the output file
	if (mMode == kPreparingToCreate) {
		CAStreamBasicDescription newFileDataFormat = mFileDataFormat;
		if (fiszero(newFileDataFormat.mSampleRate))
			newFileDataFormat.mSampleRate = 44100;	// just make something up for now
#if VERBOSE_CONVERTER
		newFileDataFormat.PrintFormat(stdout, "", "Applied to new file");
#endif
		XThrowIfError(AudioFileCreate(parentDir, filename, filetype, &newFileDataFormat, 0, &mFSRef, &mAudioFile), "create audio file");
		mMode = kPreparingToWrite;
		mOwnOpenFile = true;
	} else if (saveFileDataFormat != mFileDataFormat || fnotequal(saveFileDataFormat.mSampleRate, mFileDataFormat.mSampleRate)) {
		// second check must be explicit since operator== on ASBD treats SR of zero as "don't care"
		if (fiszero(mFileDataFormat.mSampleRate))
			mFileDataFormat.mSampleRate = mClientDataFormat.mSampleRate;
#if VERBOSE_CONVERTER
		mFileDataFormat.PrintFormat(stdout, "", "Applied to new file");
#endif
		XThrowIf(fiszero(mFileDataFormat.mSampleRate), kExtAudioFileError_InvalidDataFormat, "file's sample rate is 0");
		XThrowIfError(AudioFileSetProperty(mAudioFile, kAudioFilePropertyDataFormat, sizeof(AudioStreamBasicDescription), &mFileDataFormat), "couldn't update file's data format");
	}

	UInt32 deferSizeUpdates = 1;
	err = AudioFileSetProperty(mAudioFile, kAudioFilePropertyDeferSizeUpdates, sizeof(UInt32), &deferSizeUpdates);

	if (mConverter != NULL) {
		// encoder
		// get the magic cookie, if any, from the converter
		delete[] mMagicCookie;	mMagicCookie = NULL;
		mMagicCookieSize = 0;

		err = AudioConverterGetPropertyInfo(mConverter, kAudioConverterCompressionMagicCookie, &propertySize, NULL);

		// we can get a noErr result and also a propertySize == 0
		// -- if the file format does support magic cookies, but this file doesn't have one.
		if (err == noErr && propertySize > 0) {
			mMagicCookie = new Byte[propertySize];
			XThrowIfError(AudioConverterGetProperty(mConverter, kAudioConverterCompressionMagicCookie, &propertySize, mMagicCookie), "get audio converter's magic cookie");
			mMagicCookieSize = propertySize;	// the converter lies and tell us the wrong size
			// now set the magic cookie on the output file
			UInt32 willEatTheCookie = false;
			// the converter wants to give us one; will the file take it?
			err = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyMagicCookieData,
					NULL, &willEatTheCookie);
			if (err == noErr && willEatTheCookie) {
#if VERBOSE_CONVERTER
				printf("Setting cookie on encoded file\n");
#endif
				XThrowIfError(AudioFileSetProperty(mAudioFile, kAudioFilePropertyMagicCookieData, mMagicCookieSize, mMagicCookie), "set audio file's magic cookie");
			}
		}

		// get maximum packet size
		propertySize = sizeof(UInt32);
		XThrowIfError(AudioConverterGetProperty(mConverter, kAudioConverterPropertyMaximumOutputPacketSize, &propertySize, &mFileMaxPacketSize), "get audio converter's maximum output packet size");

		AllocateBuffers(true /* okToFail */);
	} else {
		InitFileMaxPacketSize();
	}

	if (mFileChannelLayout.IsValid() && mFileChannelLayout.NumberChannels() > 2) {
		// don't bother tagging mono/stereo files
		UInt32 isWritable;
		err = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyChannelLayout, NULL, &isWritable);
		if (!err && isWritable) {
#if VERBOSE_CHANNELMAP
			printf("writing file's channel layout: %s\n", CAChannelLayouts::ConstantToString(mFileChannelLayout.Tag()));
#endif
			err = AudioFileSetProperty(mAudioFile, kAudioFilePropertyChannelLayout,
				mFileChannelLayout.Size(), &mFileChannelLayout.Layout());
			if (err)
				CAXException::Warning("could not set the file's channel layout", err);
		} else {
#if VERBOSE_CHANNELMAP
			printf("file won't accept a channel layout (write)\n");
#endif
		}
	}

	UpdateClientMaxPacketSize();	// also sets mFrame0Offset
	mPacketMark = 0;
	mFrameMark = 0;
}
Ejemplo n.º 19
0
    bool load(CFURLRef url) {

        OSStatus status;
        memset(&aqData,0,sizeof(aqData));
        timeBase = 0;
        
        status = AudioFileOpenURL(url,kAudioFileReadPermission,0,&aqData.mAudioFile);
        checkStatus(status);
        if( status != noErr ) return false;
        
        UInt32 dataFormatSize = sizeof (aqData.mDataFormat);    // 1

        status = AudioFileGetProperty (                                  // 2
            aqData.mAudioFile,                                  // 3
            kAudioFilePropertyDataFormat,                       // 4
            &dataFormatSize,                                    // 5
            &aqData.mDataFormat                                 // 6
        );
        checkStatus(status);

        status = AudioQueueNewOutput (                                // 1
            &aqData.mDataFormat,                             // 2
            HandleOutputBuffer,                              // 3
            &aqData,                                         // 4
            CFRunLoopGetCurrent (),                          // 5
            kCFRunLoopCommonModes,                           // 6
            0,                                               // 7
            &aqData.mQueue                                   // 8
        );
        checkStatus(status);

        UInt32 maxPacketSize;
        UInt32 propertySize = sizeof (maxPacketSize);
        status = AudioFileGetProperty (                               // 1
            aqData.mAudioFile,                               // 2
            kAudioFilePropertyPacketSizeUpperBound,          // 3
            &propertySize,                                   // 4
            &maxPacketSize                                   // 5
        );
        checkStatus(status);

        deriveBufferSize (                                   // 6
            aqData.mDataFormat,                              // 7
            maxPacketSize,                                   // 8
            0.5,                                             // 9
            &aqData.bufferByteSize,                          // 10
            &aqData.mNumPacketsToRead                        // 11
        );
        
        bool isFormatVBR = (                                       // 1
            aqData.mDataFormat.mBytesPerPacket == 0 ||
            aqData.mDataFormat.mFramesPerPacket == 0
        );

        if (isFormatVBR) {                                         // 2
            aqData.mPacketDescs =
              (AudioStreamPacketDescription*) malloc (
                aqData.mNumPacketsToRead * sizeof (AudioStreamPacketDescription)
              );
        } else {                                                   // 3
            aqData.mPacketDescs = NULL;
        }

        UInt32 cookieSize = sizeof (UInt32);                   // 1
        OSStatus couldNotGetProperty =                             // 2
            AudioFileGetPropertyInfo (                         // 3
                aqData.mAudioFile,                             // 4
                kAudioFilePropertyMagicCookieData,             // 5
                &cookieSize,                                   // 6
                NULL                                           // 7
            );
    //    checkStatus(couldNotGetProperty);
        if (!couldNotGetProperty && cookieSize) {              // 8
            char* magicCookie =
                (char *) malloc (cookieSize);

            status = AudioFileGetProperty (                             // 9
                aqData.mAudioFile,                             // 10
                kAudioFilePropertyMagicCookieData,             // 11
                &cookieSize,                                   // 12
                magicCookie                                    // 13
            );
        checkStatus(status);

            status = AudioQueueSetProperty (                            // 14
                aqData.mQueue,                                 // 15
                kAudioQueueProperty_MagicCookie,               // 16
                magicCookie,                                   // 17
                cookieSize                                     // 18
            );
        checkStatus(status);

            free (magicCookie);                                // 19
        }

        return true;
    }
bool AudioQueueStreamOut::Open(const char *FileName)
{
    delete [] mInfo.mPacketDescs;
    mInfo.mPacketDescs = NULL;
    m_totalFrames = 0;
    mInfo.m_SeekToPacket = -1;
 	try {
   
        CFURLRef sndFile = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)FileName, strlen(FileName), false);
        if (!sndFile) return false;
            
        OSStatus result = AudioFileOpenURL (sndFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &mInfo.mAudioFile);
        CFRelease (sndFile);
                                
        UInt32 size = sizeof(mInfo.mDataFormat);
        XThrowIfError(AudioFileGetProperty(mInfo.mAudioFile, 
                                    kAudioFilePropertyDataFormat, &size, &mInfo.mDataFormat), "couldn't get file's data format");
        
        printf ("File format: "); mInfo.mDataFormat.Print();

        XThrowIfError(AudioQueueNewOutput(&mInfo.mDataFormat, AudioQueueStreamOut::AQBufferCallback, this, 
                                    NULL, kCFRunLoopCommonModes, 0, &mInfo.mQueue), "AudioQueueNew failed");

        UInt32 bufferByteSize;
        
        // we need to calculate how many packets we read at a time, and how big a buffer we need
        // we base this on the size of the packets in the file and an approximate duration for each buffer
        {
            bool isFormatVBR = (mInfo.mDataFormat.mBytesPerPacket == 0 || mInfo.mDataFormat.mFramesPerPacket == 0);
            
            // first check to see what the max size of a packet is - if it is bigger
            // than our allocation default size, that needs to become larger
            UInt32 maxPacketSize;
            size = sizeof(maxPacketSize);
            XThrowIfError(AudioFileGetProperty(mInfo.mAudioFile, 
                                    kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size");
            
            // adjust buffer size to represent about a half second of audio based on this format
            CalculateBytesForTime (mInfo.mDataFormat, maxPacketSize, 0.5/*seconds*/, &bufferByteSize, &mInfo.mNumPacketsToRead);
            
            if (isFormatVBR)
                mInfo.mPacketDescs = new AudioStreamPacketDescription [mInfo.mNumPacketsToRead];
            else
                mInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
                
            printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)mInfo.mNumPacketsToRead);
        }

        // (2) If the file has a cookie, we should get it and set it on the AQ
        size = sizeof(UInt32);
        result = AudioFileGetPropertyInfo (mInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL);

        if (!result && size) {
            char* cookie = new char [size];		
            XThrowIfError (AudioFileGetProperty (mInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file");
            XThrowIfError (AudioQueueSetProperty(mInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue");
            delete [] cookie;
        }

            // prime the queue with some data before starting
        mInfo.mDone = false;
        mInfo.mCurrentPacket = 0;
        for (UInt32 i = 0; i < sizeof(mInfo.mBuffers)/sizeof(mInfo.mBuffers[0]); ++i) {
            XThrowIfError(AudioQueueAllocateBuffer(mInfo.mQueue, bufferByteSize, &mInfo.mBuffers[i]), "AudioQueueAllocateBuffer failed");

            AQBufferCallback (this, mInfo.mQueue, mInfo.mBuffers[i]);
            
            if (mInfo.mDone) break;
        }	
        return IMUSIKStreamOutDefault::Create(NULL);
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
	}
    
    return false;
}