コード例 #1
0
ファイル: HLAudioFile.cpp プロジェクト: fruitsamples/HAL
void	HLAudioFile::ReadAudioBytes(SInt64 inOffset, UInt32& ioNumberBytes, void* outData, bool inCache)
{
	ThrowIf(mAudioFileID == 0, CAException(fnOpnErr), "HLAudioFile::ReadAudioBytes: file isn't prepared");
	
	OSStatus theError = AudioFileReadBytes(mAudioFileID, inCache, inOffset, &ioNumberBytes, outData);
	ThrowIfError(theError, CAException(theError), "HLAudioFile::ReadAudioBytes: couldn't read the data");
}
コード例 #2
0
ファイル: SoundEngine.cpp プロジェクト: melling/Sierpinski
		OSStatus LoadFileData(const char *inFilePath, void* &outData, UInt32 &outDataSize, ALuint &outBufferID)
		{
			AudioFileID theAFID = 0;
			OSStatus result = noErr;
			UInt64 theFileSize = 0;
			AudioStreamBasicDescription theFileFormat;
			
			result = LoadFileDataInfo(inFilePath, theAFID, theFileFormat, theFileSize);
			outDataSize = (UInt32)theFileSize;
				AssertNoError("Error loading file info", fail)

			outData = malloc(outDataSize);

			result = AudioFileReadBytes(theAFID, false, 0, &outDataSize, outData);
				AssertNoError("Error reading file data", fail)
				
			if (!TestAudioFormatNativeEndian(theFileFormat) && (theFileFormat.mBitsPerChannel > 8)) 
				return kSoundEngineErrInvalidFileFormat;
		
			alGenBuffers(1, &outBufferID);
				AssertNoOALError("Error generating buffer\n", fail);
			
			alBufferDataStaticProc(outBufferID, GetALFormat(theFileFormat), outData, outDataSize, theFileFormat.mSampleRate);
				AssertNoOALError("Error attaching data to buffer\n", fail);

			AudioFileClose(theAFID);
			return result;
			
		fail:			
			if (theAFID)
				AudioFileClose(theAFID);
			if (outData)
			{
				free(outData);
				outData = NULL;
			}
			return result;
		}
コード例 #3
0
void* MyGetOpenALAudioData(CFURLRef inFileURL, ALsizei *outDataSize, ALenum *outDataFormat, ALsizei*	outSampleRate)
{
	OSStatus						err = noErr;
	UInt64							fileDataSize = 0;
	AudioStreamBasicDescription		theFileFormat;
	UInt32							thePropertySize = sizeof(theFileFormat);
	AudioFileID						afid = 0;
	void*							theData = NULL;
	
	// Open a file with ExtAudioFileOpen()
	err = AudioFileOpenURL(inFileURL, kAudioFileReadPermission, 0, &afid);
	if(err) { printf("MyGetOpenALAudioData: AudioFileOpenURL FAILED, Error = %ld\n", err); goto Exit; }
	
	// Get the audio data format
	err = AudioFileGetProperty(afid, kAudioFilePropertyDataFormat, &thePropertySize, &theFileFormat);
	if(err) { printf("MyGetOpenALAudioData: AudioFileGetProperty(kAudioFileProperty_DataFormat) FAILED, Error = %ld\n", err); goto Exit; }
	
	if (theFileFormat.mChannelsPerFrame > 2)  {
		printf("MyGetOpenALAudioData - Unsupported Format, channel count is greater than stereo\n"); goto Exit;
	}
	
	if ((theFileFormat.mFormatID != kAudioFormatLinearPCM) || (!TestAudioFormatNativeEndian(theFileFormat))) {
		printf("MyGetOpenALAudioData - Unsupported Format, must be little-endian PCM\n"); goto Exit;
	}
	
	if ((theFileFormat.mBitsPerChannel != 8) && (theFileFormat.mBitsPerChannel != 16)) {
		printf("MyGetOpenALAudioData - Unsupported Format, must be 8 or 16 bit PCM\n"); goto Exit;
	}
	
	
	thePropertySize = sizeof(fileDataSize);
	err = AudioFileGetProperty(afid, kAudioFilePropertyAudioDataByteCount, &thePropertySize, &fileDataSize);
	if(err) { printf("MyGetOpenALAudioData: AudioFileGetProperty(kAudioFilePropertyAudioDataByteCount) FAILED, Error = %ld\n", err); goto Exit; }
	
	// Read all the data into memory
	UInt32		dataSize = fileDataSize;
	theData = malloc(dataSize);
	if (theData)
	{
		AudioFileReadBytes(afid, false, 0, &dataSize, theData);
		if(err == noErr)
		{
			// success
			*outDataSize = (ALsizei)dataSize;
			*outDataFormat = (theFileFormat.mChannelsPerFrame > 1) ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16;
			*outSampleRate = (ALsizei)theFileFormat.mSampleRate;
		}
		else
		{
			// failure
			free (theData);
			theData = NULL; // make sure to return NULL
			printf("MyGetOpenALAudioData: ExtAudioFileRead FAILED, Error = %ld\n", err); goto Exit;
		}
	}
	
Exit:
	// Dispose the ExtAudioFileRef, it is no longer needed
	if (afid) AudioFileClose(afid);
	return theData;
}
コード例 #4
0
ファイル: AudioFile.cpp プロジェクト: osoumen/C700
//-----------------------------------------------------------------------------
bool AudioFile::Load()
{
#if MAC
	AudioFileID mAudioFileID;
    AudioStreamBasicDescription fileDescription, outputFormat;
    SInt64 dataSize64;
    UInt32 dataSize;
	
	OSStatus err;
	UInt32 size;
	
    // ファイルを開く
	FSRef	ref;
	Boolean	isDirectory=false;
	FSPathMakeRef((const UInt8*)GetFilePath(), &ref, &isDirectory);
	
	err = AudioFileOpen(&ref, fsRdPerm, 0, &mAudioFileID);
    if (err) {
        //NSLog(@"AudioFileOpen failed");
        return false;
    }
	
    // 開いたファイルの基本情報を fileDescription へ
    size = sizeof(AudioStreamBasicDescription);
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyDataFormat, 
							   &size, &fileDescription);
    if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	
    // 開いたファイルのデータ部のバイト数を dataSize へ
    size = sizeof(SInt64);
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyAudioDataByteCount, 
							   &size, &dataSize64);
    if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	dataSize = static_cast<UInt32>(dataSize64);
	
	AudioFileTypeID	fileTypeID;
	size = sizeof( AudioFileTypeID );
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFileFormat, &size, &fileTypeID);
	if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	
	// Instrument情報を初期化
	mInstData.basekey	= 60;
	mInstData.lowkey	= 0;
	mInstData.highkey	= 127;
	mInstData.loop		= 0;
	
	//ループポイントの取得
	Float64		st_point=0.0,end_point=0.0;
	if ( fileTypeID == kAudioFileAIFFType || fileTypeID == kAudioFileAIFCType ) {
		//INSTチャンクの取得
		AudioFileGetUserDataSize(mAudioFileID, 'INST', 0, &size);
		if ( size > 4 ) {
			UInt8	*instChunk = new UInt8[size];
			AudioFileGetUserData(mAudioFileID, 'INST', 0, &size, instChunk);
			
			//MIDI情報の取得
			mInstData.basekey = instChunk[0];
			mInstData.lowkey = instChunk[2];
			mInstData.highkey = instChunk[3];
			
			if ( instChunk[9] > 0 ) {	//ループフラグを確認
				//マーカーの取得
				UInt32	writable;
				err = AudioFileGetPropertyInfo(mAudioFileID, kAudioFilePropertyMarkerList,
											   &size, &writable);
				if (err) {
					//NSLog(@"AudioFileGetPropertyInfo failed");
					AudioFileClose(mAudioFileID);
					return NULL;
				}
				UInt8	*markersBuffer = new UInt8[size];
				AudioFileMarkerList	*markers = reinterpret_cast<AudioFileMarkerList*>(markersBuffer);
				
				err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyMarkerList, 
										   &size, markers);
				if (err) {
					//NSLog(@"AudioFileGetProperty failed");
					AudioFileClose(mAudioFileID);
					return NULL;
				}
				
				//ループポイントの設定
				for (unsigned int i=0; i<markers->mNumberMarkers; i++) {
					if (markers->mMarkers[i].mMarkerID == instChunk[11] ) {
						st_point = markers->mMarkers[i].mFramePosition;
					}
					else if (markers->mMarkers[i].mMarkerID == instChunk[13] ) {
						end_point = markers->mMarkers[i].mFramePosition;
					}
					CFRelease(markers->mMarkers[i].mName);
				}
				if ( st_point < end_point ) {
					mInstData.loop = 1;
				}
				delete [] markersBuffer;
			}
			delete [] instChunk;
		}
		
	}
	else if ( fileTypeID == kAudioFileWAVEType ) {
		//smplチャンクの取得
		AudioFileGetUserDataSize( mAudioFileID, 'smpl', 0, &size );
		if ( size >= sizeof(WAV_smpl) ) {
			UInt8	*smplChunk = new UInt8[size];
			AudioFileGetUserData( mAudioFileID, 'smpl', 0, &size, smplChunk );
			WAV_smpl	*smpl = (WAV_smpl *)smplChunk;
			
			smpl->loops = EndianU32_LtoN( smpl->loops );
			
			if ( smpl->loops > 0 ) {
				mInstData.loop = true;
				mInstData.basekey = EndianU32_LtoN( smpl->note );
				st_point = EndianU32_LtoN( smpl->start );
				end_point = EndianU32_LtoN( smpl->end ) + 1;	//SoundForge等では最終ポイントを含める解釈
				//end_point = EndianU32_LtoN( smpl->end );	//PeakではなぜかAIFFと同じ
			}
			else {
				mInstData.basekey = EndianU32_LtoN( smpl->note );
			}
			delete [] smplChunk;
		}
	}
	
	//容量の制限
	SInt64	dataSamples = dataSize / fileDescription.mBytesPerFrame;
	if ( dataSamples > MAXIMUM_SAMPLES ) {
		dataSize = MAXIMUM_SAMPLES * fileDescription.mBytesPerFrame;
	}
	if ( st_point > MAXIMUM_SAMPLES ) {
		st_point = MAXIMUM_SAMPLES;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		end_point = MAXIMUM_SAMPLES;
	}
	
    // 波形一時読み込み用メモリを確保
    char *fileBuffer;
	unsigned int	fileBufferSize;
	if (mInstData.loop) {
		fileBufferSize = dataSize+EXPAND_BUFFER*fileDescription.mBytesPerFrame;
	}
	else {
		fileBufferSize = dataSize;
	}
	fileBuffer = new char[fileBufferSize];
	memset(fileBuffer, 0, fileBufferSize);
	
	// ファイルから波形データの読み込み
	err = AudioFileReadBytes(mAudioFileID, false, 0, &dataSize, fileBuffer);
    if (err) {
        //NSLog(@"AudioFileReadBytes failed");
        AudioFileClose(mAudioFileID);
        delete [] fileBuffer;
        return false;
    }
    AudioFileClose(mAudioFileID);
	
    //ループを展開する
    Float64	adjustment = 1.0;
    outputFormat=fileDescription;
	if (mInstData.loop) {
		UInt32	plusalpha=0, framestocopy;
		while (plusalpha < EXPAND_BUFFER) {
			framestocopy = 
			(end_point-st_point)>(EXPAND_BUFFER-plusalpha)?(EXPAND_BUFFER-plusalpha):end_point-st_point;
			memcpy(fileBuffer+((int)end_point+plusalpha)*fileDescription.mBytesPerFrame,
				   fileBuffer+(int)st_point*fileDescription.mBytesPerFrame,
				   framestocopy*fileDescription.mBytesPerFrame);
			plusalpha += framestocopy;
		}
		dataSize += plusalpha*fileDescription.mBytesPerFrame;
		
		//16サンプル境界にFIXする
		adjustment = ( (long long)((end_point-st_point)/16) ) / ((end_point-st_point)/16.0);
		st_point *= adjustment;
		end_point *= adjustment;
	}
	outputFormat.mFormatID = kAudioFormatLinearPCM;
    outputFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagsNativeEndian;
	outputFormat.mChannelsPerFrame = 1;
	outputFormat.mBytesPerFrame = sizeof(float);
	outputFormat.mBitsPerChannel = 32;
	outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame;
	
    // バイトオーダー変換用のコンバータを用意
    AudioConverterRef converter;
	err = AudioConverterNew(&fileDescription, &outputFormat, &converter);
    if (err) {
        //NSLog(@"AudioConverterNew failed");
        delete [] fileBuffer;
        return false;
    }
	
	//サンプリングレート変換の質を最高に設定
//	if (fileDescription.mSampleRate != outputFormat.mSampleRate) {
//		size = sizeof(UInt32);
//		UInt32	setProp = kAudioConverterQuality_Max;
//		AudioConverterSetProperty(converter, kAudioConverterSampleRateConverterQuality,
//								  size, &setProp);
//        
//        size = sizeof(UInt32);
//		setProp = kAudioConverterSampleRateConverterComplexity_Mastering;
//		AudioConverterSetProperty(converter, kAudioConverterSampleRateConverterComplexity,
//								  size, &setProp);
//        
//	}
	
    //出力に必要十分なバッファサイズを得る
	UInt32	outputSize = dataSize;
	size = sizeof(UInt32);
	err = AudioConverterGetProperty(converter, kAudioConverterPropertyCalculateOutputBufferSize, 
									&size, &outputSize);
	if (err) {
		//NSLog(@"AudioConverterGetProperty failed");
		delete [] fileBuffer;
		AudioConverterDispose(converter);
        return false;
	}
    UInt32 monoSamples = outputSize/sizeof(float);
    
    // バイトオーダー変換
    float *monoData = new float[monoSamples];
	AudioConverterConvertBuffer(converter, dataSize, fileBuffer,
								&outputSize, monoData);
    if(outputSize == 0) {
        //NSLog(@"AudioConverterConvertBuffer failed");
        delete [] fileBuffer;
        AudioConverterDispose(converter);
        return false;
    }
    
    //ループ長が16の倍数でない場合はサンプリングレート変換
    Float64 inputSampleRate = fileDescription.mSampleRate;
    Float64 outputSampleRate = fileDescription.mSampleRate * adjustment;
    int	outSamples = monoSamples;
    if ( outputSampleRate == inputSampleRate ) {
        m_pAudioData = new short[monoSamples];
        for (int i=0; i<monoSamples; i++) {
            m_pAudioData[i] = static_cast<short>(monoData[i] * 32768);
        }
    }
    else {
        outSamples = static_cast<int>(monoSamples / (inputSampleRate / outputSampleRate));
        m_pAudioData = new short[outSamples];
        resampling(monoData, monoSamples, inputSampleRate,
                   m_pAudioData, &outSamples, outputSampleRate);
    }
    
    // 後始末
    delete [] monoData;
    delete [] fileBuffer;
    AudioConverterDispose(converter);
	
	//Instデータの設定
	if ( st_point > MAXIMUM_SAMPLES ) {
		mInstData.lp = MAXIMUM_SAMPLES;
	}
	else {
		mInstData.lp			= st_point;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		mInstData.lp_end = MAXIMUM_SAMPLES;
	}
	else {
		mInstData.lp_end		= end_point;
	}
	mInstData.srcSamplerate	= outputSampleRate;
    mLoadedSamples			= outSamples;
	
	mIsLoaded = true;
	
	return true;
#else
	//Windowsのオーディオファイル読み込み処理

	// ファイルを開く
	HMMIO	hmio = NULL;
	MMRESULT	err;
	DWORD		size;

	hmio = mmioOpen( mPath, NULL, MMIO_READ );
	if ( !hmio ) {
		return false;
	}
	
	// RIFFチャンクを探す
	MMCKINFO	riffChunkInfo;
	riffChunkInfo.fccType = mmioFOURCC('W', 'A', 'V', 'E');
	err = mmioDescend( hmio, &riffChunkInfo, NULL, MMIO_FINDRIFF );
	if ( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( (riffChunkInfo.ckid != FOURCC_RIFF) || (riffChunkInfo.fccType != mmioFOURCC('W', 'A', 'V', 'E') ) ) {
		mmioClose( hmio, 0 );
		return false;
	}

	// フォーマットチャンクを探す
	MMCKINFO	formatChunkInfo;
	formatChunkInfo.ckid = mmioFOURCC('f', 'm', 't', ' ');
	err = mmioDescend( hmio, &formatChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if ( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( formatChunkInfo.cksize < sizeof(PCMWAVEFORMAT) ) {
		mmioClose( hmio, 0 );
		return false;
	}

	//フォーマット情報を取得
	WAVEFORMATEX	pcmWaveFormat;
	DWORD			fmsize = (formatChunkInfo.cksize > sizeof(WAVEFORMATEX)) ? sizeof(WAVEFORMATEX):formatChunkInfo.cksize;
	size = mmioRead( hmio, (HPSTR)&pcmWaveFormat, fmsize );
	if ( size != fmsize ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( pcmWaveFormat.wFormatTag != WAVE_FORMAT_PCM ) {
		mmioClose( hmio, 0 );
		return false;
	}
	mmioAscend(hmio, &formatChunkInfo, 0);

	// Instrument情報を初期化
	mInstData.basekey	= 60;
	mInstData.lowkey	= 0;
	mInstData.highkey	= 127;
	mInstData.loop		= 0;

	//smplチャンクを探す
	MMCKINFO	smplChunkInfo;
	smplChunkInfo.ckid = mmioFOURCC('s', 'm', 'p', 'l');
	err = mmioDescend( hmio, &smplChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if ( err != MMSYSERR_NOERROR ) {
		smplChunkInfo.cksize = 0;
	}
	double	st_point=0.0;
	double	end_point=0.0;
	if ( smplChunkInfo.cksize >= sizeof(WAV_smpl) ) {
		//ループポイントの取得
		unsigned char	*smplChunk = new unsigned char[smplChunkInfo.cksize];
		size = mmioRead(hmio,(HPSTR)smplChunk, smplChunkInfo.cksize);
		WAV_smpl	*smpl = (WAV_smpl *)smplChunk;

		if ( smpl->loops > 0 ) {
			mInstData.loop = 1;
			mInstData.basekey = smpl->note;
			st_point = smpl->start;
			end_point = smpl->end + 1;	//SoundForge等では最終ポイントを含める解釈
		}
		else {
			mInstData.basekey = smpl->note;
		}
		delete [] smplChunk;
	}
	mmioAscend(hmio, &formatChunkInfo, 0);

	//dataチャンクを探す
	MMCKINFO dataChunkInfo;
	dataChunkInfo.ckid = mmioFOURCC('d', 'a', 't', 'a');
	err = mmioDescend( hmio, &dataChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}

	// 波形一時読み込み用メモリを確保
	unsigned int	dataSize = dataChunkInfo.cksize;
	int				bytesPerSample = pcmWaveFormat.nBlockAlign;
	char			*fileBuffer;
	unsigned int	fileBufferSize;

	//容量制限
	int	dataSamples = dataSize / pcmWaveFormat.nBlockAlign;
	if ( dataSamples > MAXIMUM_SAMPLES ) {
		dataSize = MAXIMUM_SAMPLES * pcmWaveFormat.nBlockAlign;
	}
	if ( st_point > MAXIMUM_SAMPLES ) {
		st_point = MAXIMUM_SAMPLES;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		end_point = MAXIMUM_SAMPLES;
	}
	
	
	if (mInstData.loop) {
		fileBufferSize = dataSize+EXPAND_BUFFER*bytesPerSample;
	}
	else {
		fileBufferSize = dataSize;
	}
	fileBuffer = new char[fileBufferSize];
	memset(fileBuffer, 0, fileBufferSize);
	
	// ファイルから波形データの読み込み
	size = mmioRead(hmio, (HPSTR)fileBuffer, dataSize);
	if ( size != dataSize ) {
		mmioClose( hmio, 0 );
		return false;
	}
	mmioClose(hmio,0);

	//ループを展開する
	double	inputSampleRate = pcmWaveFormat.nSamplesPerSec;
	double	outputSampleRate = inputSampleRate;
	if (mInstData.loop) {
		unsigned int	plusalpha=0;
		double			framestocopy;
		while (plusalpha < EXPAND_BUFFER) {
			framestocopy = 
			(end_point-st_point)>(EXPAND_BUFFER-plusalpha)?(EXPAND_BUFFER-plusalpha):end_point-st_point;
			memcpy(fileBuffer+((int)end_point+plusalpha)*bytesPerSample,
				   fileBuffer+(int)st_point*bytesPerSample,
				   static_cast<size_t>(framestocopy*bytesPerSample));
			plusalpha += static_cast<unsigned int>(framestocopy);
		}
		dataSize += plusalpha*bytesPerSample;
		
		//16サンプル境界にFIXする
		double	adjustment = ( (long long)((end_point-st_point)/16) ) / ((end_point-st_point)/16.0);
		outputSampleRate *= adjustment;
		st_point *= adjustment;
		end_point *= adjustment;
	}

	//一旦floatモノラルデータに変換
	int	bytesPerChannel = bytesPerSample / pcmWaveFormat.nChannels;
	unsigned int	inputPtr = 0;
	unsigned int	outputPtr = 0;
	int				monoSamples = dataSize / bytesPerSample;
	float	range = static_cast<float>((1<<(bytesPerChannel*8-1)) * pcmWaveFormat.nChannels);
	float	*monoData = new float[monoSamples];
	while (inputPtr < dataSize) {
		int	frameSum = 0;
		for (int ch=0; ch<pcmWaveFormat.nChannels; ch++) {
			for (int i=0; i<bytesPerChannel; i++) {
				if (i<bytesPerChannel-1) {
					frameSum += (unsigned char)fileBuffer[inputPtr] << (8*i);
				}
				else {
					frameSum += fileBuffer[inputPtr] << (8*i);
				}
				inputPtr++;
			}
		}
		monoData[outputPtr] = frameSum / range;
		outputPtr++;
	}

	//ループ長が16の倍数でない場合はサンプリングレート変換
	int	outSamples = monoSamples;
	if ( outputSampleRate == inputSampleRate ) {
		m_pAudioData = new short[monoSamples];
		for (int i=0; i<monoSamples; i++) {
			m_pAudioData[i] = static_cast<short>(monoData[i] * 32768);
		}
	}
	else {
		outSamples = static_cast<int>(monoSamples / (inputSampleRate / outputSampleRate));
		m_pAudioData = new short[outSamples];
		resampling(monoData, monoSamples, inputSampleRate,
				   m_pAudioData, &outSamples, outputSampleRate);
	}

	// 後始末
	delete [] fileBuffer;
	delete [] monoData;

	//Instデータの設定
	mInstData.lp			= static_cast<int>(st_point);
	mInstData.lp_end		= static_cast<int>(end_point);
	mInstData.srcSamplerate	= outputSampleRate;
    mLoadedSamples			= outSamples;

	mIsLoaded = true;

	return true;
#endif
}
コード例 #5
0
ファイル: oalOSX.cpp プロジェクト: Aye1/RVProject
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ALUTAPI ALvoid ALUTAPIENTRY alutLoadWAVFile(ALbyte *file,ALenum *format,ALvoid **data,ALsizei *size,ALsizei *freq)
{
	OSStatus		err = noErr;
	AudioFileID		audioFile = 0;
	FSRef			fsRef;

	*data = NULL; // in case of failure, do not return some unitialized value as a bogus address

	if (IsRelativePath(file))
	{
		char			absolutePath[256];
		// we need to make a full path here so FSPathMakeRef() works properly
		MakeAbsolutePath(file, absolutePath, 256);
		// create an fsref from the file parameter
		err = FSPathMakeRef ((const UInt8 *) absolutePath, &fsRef, NULL);
	}
	else
		err = FSPathMakeRef ((const UInt8 *) file, &fsRef, NULL);
	
	if (err == noErr)
	{
		err = AudioFileOpen(&fsRef, fsRdPerm, 0, &audioFile);
		if (err == noErr)
		{
			UInt32							dataSize;
			CAStreamBasicDescription		asbd;
			
			dataSize = sizeof(CAStreamBasicDescription);
			AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat, &dataSize, &asbd);
			
			*format = GetOALFormatFromASBD(asbd);
			if (IsFormatSupported(*format))
			{
				*freq = (UInt32) asbd.mSampleRate;
				
				SInt64	audioDataSize = 0;
				dataSize = sizeof(audioDataSize);
				err = AudioFileGetProperty(audioFile, kAudioFilePropertyAudioDataByteCount, &dataSize, &audioDataSize);
				if (err == noErr)
				{
					*size = audioDataSize;
					*data = NULL;
					*data = calloc(1, audioDataSize);
					if (*data)
					{
						dataSize = audioDataSize;
						err = AudioFileReadBytes(audioFile, false, 0, &dataSize, *data);
						
						if ((asbd.mFormatID == kAudioFormatLinearPCM) && (asbd.mBitsPerChannel > 8))
						{
							// we just got 16 bit pcm data out of a WAVE file on a big endian platform, so endian swap the data
							AudioConverterRef				converter;
							CAStreamBasicDescription		outFormat = asbd;
							void *							tempData = NULL;
							
							// ste format to big endian
							outFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
							// make some place for converted data
							tempData = calloc(1 , audioDataSize);
							
							err = AudioConverterNew(&asbd, &outFormat, &converter);
							if ((err == noErr) && (tempData != NULL))
							{
								UInt32		bufferSize = audioDataSize;
								err = AudioConverterConvertBuffer(converter, audioDataSize, *data, &bufferSize, tempData);
								if (err == noErr)
									memcpy(*data, tempData, audioDataSize);
								AudioConverterDispose(converter);
							}
							if (tempData) free (tempData);
						}
					}
				}
			}
			err = AudioFileClose(audioFile);
		}
	}
}
コード例 #6
0
ファイル: main.c プロジェクト: CarlChenCC/examples
static bool LoadWAVFile(const char* filename, ALenum* format, ALvoid** data, ALsizei* size, ALsizei* freq, Float64* estimatedDurationOut)
{
	CFStringRef filenameStr = CFStringCreateWithCString( NULL, filename, kCFStringEncodingUTF8 );
	CFURLRef url = CFURLCreateWithFileSystemPath( NULL, filenameStr, kCFURLPOSIXPathStyle, false );
	CFRelease( filenameStr );
	
	AudioFileID audioFile;
	OSStatus error = AudioFileOpenURL( url, kAudioFileReadPermission, kAudioFileWAVEType, &audioFile );
	CFRelease( url );
	
	if ( error != noErr )
	{
		fprintf( stderr, "Error opening audio file. %d\n", error );
		return false;
	}
	
	AudioStreamBasicDescription basicDescription;
	UInt32 propertySize = sizeof(basicDescription);
	error = AudioFileGetProperty( audioFile, kAudioFilePropertyDataFormat, &propertySize, &basicDescription );
	
	if ( error != noErr )
	{
		fprintf( stderr, "Error reading audio file basic description. %d\n", error );
		AudioFileClose( audioFile );
		return false;
	}
	
	if ( basicDescription.mFormatID != kAudioFormatLinearPCM )
	{
		// Need PCM for Open AL. WAVs are (I believe) by definition PCM, so this check isn't necessary. It's just here
		// in case I ever use this with another audio format.
		fprintf( stderr, "Audio file is not linear-PCM. %d\n", basicDescription.mFormatID );
		AudioFileClose( audioFile );
		return false;
	}
	
	UInt64 audioDataByteCount = 0;
	propertySize = sizeof(audioDataByteCount);
	error = AudioFileGetProperty( audioFile, kAudioFilePropertyAudioDataByteCount, &propertySize, &audioDataByteCount );
	if ( error != noErr )
	{
		fprintf( stderr, "Error reading audio file byte count. %d\n", error );
		AudioFileClose( audioFile );
		return false;
	}
	
	Float64 estimatedDuration = 0;
	propertySize = sizeof(estimatedDuration);
	error = AudioFileGetProperty( audioFile, kAudioFilePropertyEstimatedDuration, &propertySize, &estimatedDuration );
	if ( error != noErr )
	{
		fprintf( stderr, "Error reading estimated duration of audio file. %d\n", error );
		AudioFileClose( audioFile );
		return false;
	}
	
	ALenum alFormat = 0;
	
	if ( basicDescription.mChannelsPerFrame == 1 )
	{
		if ( basicDescription.mBitsPerChannel == 8 )
			alFormat = AL_FORMAT_MONO8;
		else if ( basicDescription.mBitsPerChannel == 16 )
			alFormat = AL_FORMAT_MONO16;
		else
		{
			fprintf( stderr, "Expected 8 or 16 bits for the mono channel but got %d\n", basicDescription.mBitsPerChannel );
			AudioFileClose( audioFile );
			return false;
		}
		
	}
	else if ( basicDescription.mChannelsPerFrame == 2 )
	{
		if ( basicDescription.mBitsPerChannel == 8 )
			alFormat = AL_FORMAT_STEREO8;
		else if ( basicDescription.mBitsPerChannel == 16 )
			alFormat = AL_FORMAT_STEREO16;
		else
		{
			fprintf( stderr, "Expected 8 or 16 bits per channel but got %d\n", basicDescription.mBitsPerChannel );
			AudioFileClose( audioFile );
			return false;
		}
	}
	else
	{
		fprintf( stderr, "Expected 1 or 2 channels in audio file but got %d\n", basicDescription.mChannelsPerFrame );
		AudioFileClose( audioFile );
		return false;
	}
	
	UInt32 numBytesToRead = audioDataByteCount;
	void* buffer = malloc( numBytesToRead );
	
	if ( buffer == NULL )
	{
		fprintf( stderr, "Error allocating buffer for audio data of size %u\n", numBytesToRead );
		return false;
	}
	
	error = AudioFileReadBytes( audioFile, false, 0, &numBytesToRead, buffer );
	AudioFileClose( audioFile );
	
	if ( error != noErr )
	{
		fprintf( stderr, "Error reading audio bytes. %d\n", error );
		free(buffer);
		return false;
	}
	
	if ( numBytesToRead != audioDataByteCount )
	{
		fprintf( stderr, "Tried to read %lld bytes from the audio file but only got %d bytes\n", audioDataByteCount, numBytesToRead );
		free(buffer);
		return false;
	}
	
	*freq = basicDescription.mSampleRate;
	*size = audioDataByteCount;
	*format = alFormat;
	*data = buffer;
	*estimatedDurationOut = estimatedDuration;
	
	return true;
}