Ejemplo n.º 1
0
OSStatus RageSound_CA::GetData(AudioDeviceID inDevice,
							   const AudioTimeStamp *inNow,
							   const AudioBufferList *inInputData,
							   const AudioTimeStamp *inInputTime,
							   AudioBufferList *outOutputData,
							   const AudioTimeStamp *inOutputTime,
							   void *inClientData)
{
	RageTimer tm;
	RageSound_CA *This = (RageSound_CA *)inClientData;
	AudioBuffer& buf = outOutputData->mBuffers[0];
	UInt32 dataPackets = buf.mDataByteSize >> 3; // 8 byes per packet
	int64_t decodePos = int64_t(inOutputTime->mSampleTime);
	int64_t now = int64_t(inNow->mSampleTime);
	
	RageTimer tm2;
	int16_t buffer[dataPackets * (kBytesPerPacket >> 1)];
		
	This->Mix(buffer, dataPackets, decodePos, now);
	g_fLastMixTimes[g_iLastMixTimePos] = tm2.GetDeltaTime();
	++g_iLastMixTimePos;
	wrap(g_iLastMixTimePos, NUM_MIX_TIMES);
		
	AudioConverterConvertBuffer(This->mConverter, dataPackets * kBytesPerPacket,
								buffer, &buf.mDataByteSize, buf.mData);
		
	g_fLastIOProcTime = tm.GetDeltaTime();
	++g_iNumIOProcCalls;
	
	return noErr;
}
Ejemplo n.º 2
0
static void writeCallback(void *aqData,
						  AudioQueueRef inAQ, AudioQueueBufferRef inBuffer)
{
	AQData *d = (AQData *) aqData;
	OSStatus err;

	int len =
		(d->writeBufferByteSize * d->writeAudioFormat.mSampleRate / 1) /
		d->devicewriteFormat.mSampleRate /
		d->devicewriteFormat.mChannelsPerFrame;

	ms_mutex_lock(&d->mutex);
	if (d->write_started == FALSE) {
		ms_mutex_unlock(&d->mutex);
		return;
	}
	if (d->bufferizer->size >= len) {
#if 0
		UInt32 bsize = d->writeBufferByteSize;
		uint8_t *pData = ms_malloc(len);

		ms_bufferizer_read(d->bufferizer, pData, len);
		err = AudioConverterConvertBuffer(d->writeAudioConverter,
										  len,
										  pData,
										  &bsize, inBuffer->mAudioData);
		if (err != noErr) {
			ms_error("writeCallback: AudioConverterConvertBuffer %d", err);
		}
		ms_free(pData);

		if (bsize != d->writeBufferByteSize)
			ms_warning("d->writeBufferByteSize = %i len = %i bsize = %i",
					   d->writeBufferByteSize, len, bsize);
#else
		ms_bufferizer_read(d->bufferizer, inBuffer->mAudioData, len);
#endif
	} else {
		memset(inBuffer->mAudioData, 0, d->writeBufferByteSize);
	}
	inBuffer->mAudioDataByteSize = d->writeBufferByteSize;

	if (gain_changed_out == true)
	  {
	    AudioQueueSetParameter (d->writeQueue,
				    kAudioQueueParam_Volume,
				    gain_volume_out);
	    gain_changed_out = false;
	  }

	err = AudioQueueEnqueueBuffer(d->writeQueue, inBuffer, 0, NULL);
	if (err != noErr) {
		ms_error("AudioQueueEnqueueBuffer %d", err);
	}
	ms_mutex_unlock(&d->mutex);
}
Ejemplo n.º 3
0
static void aq_put(MSFilter * f, mblk_t * m)
{
	AQData *d = (AQData *) f->data;
	ms_mutex_lock(&d->mutex);
	ms_bufferizer_put(d->bufferizer, m);
	ms_mutex_unlock(&d->mutex);

	int len =
		(d->writeBufferByteSize * d->writeAudioFormat.mSampleRate / 1) /
		d->devicewriteFormat.mSampleRate /
		d->devicewriteFormat.mChannelsPerFrame;
	if (d->write_started == FALSE && d->bufferizer->size >= len) {
		AudioQueueBufferRef curbuf = d->writeBuffers[d->curWriteBuffer];
#if 0
		OSStatus err;
		UInt32 bsize = d->writeBufferByteSize;
		uint8_t *pData = ms_malloc(len);

		ms_bufferizer_read(d->bufferizer, pData, len);
		err = AudioConverterConvertBuffer(d->writeAudioConverter,
										  len,
										  pData,
										  &bsize, curbuf->mAudioData);
		if (err != noErr) {
			ms_error("writeCallback: AudioConverterConvertBuffer %d", err);
		}
		ms_free(pData);

		if (bsize != d->writeBufferByteSize)
			ms_warning("d->writeBufferByteSize = %i len = %i bsize = %i",
					   d->writeBufferByteSize, len, bsize);
#else
		ms_bufferizer_read(d->bufferizer, curbuf->mAudioData, len);
#endif
		curbuf->mAudioDataByteSize = d->writeBufferByteSize;
		putWriteAQ(d, d->curWriteBuffer);
		++d->curWriteBuffer;
	}
	if (d->write_started == FALSE
		&& d->curWriteBuffer == kNumberAudioOutDataBuffers - 1) {
		OSStatus err;
		err = AudioQueueStart(d->writeQueue, NULL	// start time. NULL means ASAP.
			);
		if (err != noErr) {
			ms_error("AudioQueueStart -write- %d", err);
		}
		d->write_started = TRUE;

	}
}
JNIEXPORT jint JNICALL Java_com_apple_audio_toolbox_AudioConverter_AudioConverterConvertBuffer
  (JNIEnv *, jclass, jint inAudioConverter, jint inInputDataSize, jint inInputData, jint ioOutputDataSize, jint outOutputData)
{
	return (jint)AudioConverterConvertBuffer((AudioConverterRef)inAudioConverter, (UInt32)inInputDataSize, (void *)inInputData, (UInt32 *)ioOutputDataSize, (void *)outOutputData);
}
Ejemplo n.º 5
0
//-----------------------------------------------------------------------------
bool AudioFile::Load()
{
#if MAC
	AudioFileID mAudioFileID;
    AudioStreamBasicDescription fileDescription, outputFormat;
    SInt64 dataSize64;
    UInt32 dataSize;
	
	OSStatus err;
	UInt32 size;
	
    // ファイルを開く
	FSRef	ref;
	Boolean	isDirectory=false;
	FSPathMakeRef((const UInt8*)GetFilePath(), &ref, &isDirectory);
	
	err = AudioFileOpen(&ref, fsRdPerm, 0, &mAudioFileID);
    if (err) {
        //NSLog(@"AudioFileOpen failed");
        return false;
    }
	
    // 開いたファイルの基本情報を fileDescription へ
    size = sizeof(AudioStreamBasicDescription);
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyDataFormat, 
							   &size, &fileDescription);
    if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	
    // 開いたファイルのデータ部のバイト数を dataSize へ
    size = sizeof(SInt64);
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyAudioDataByteCount, 
							   &size, &dataSize64);
    if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	dataSize = static_cast<UInt32>(dataSize64);
	
	AudioFileTypeID	fileTypeID;
	size = sizeof( AudioFileTypeID );
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFileFormat, &size, &fileTypeID);
	if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	
	// Instrument情報を初期化
	mInstData.basekey	= 60;
	mInstData.lowkey	= 0;
	mInstData.highkey	= 127;
	mInstData.loop		= 0;
	
	//ループポイントの取得
	Float64		st_point=0.0,end_point=0.0;
	if ( fileTypeID == kAudioFileAIFFType || fileTypeID == kAudioFileAIFCType ) {
		//INSTチャンクの取得
		AudioFileGetUserDataSize(mAudioFileID, 'INST', 0, &size);
		if ( size > 4 ) {
			UInt8	*instChunk = new UInt8[size];
			AudioFileGetUserData(mAudioFileID, 'INST', 0, &size, instChunk);
			
			//MIDI情報の取得
			mInstData.basekey = instChunk[0];
			mInstData.lowkey = instChunk[2];
			mInstData.highkey = instChunk[3];
			
			if ( instChunk[9] > 0 ) {	//ループフラグを確認
				//マーカーの取得
				UInt32	writable;
				err = AudioFileGetPropertyInfo(mAudioFileID, kAudioFilePropertyMarkerList,
											   &size, &writable);
				if (err) {
					//NSLog(@"AudioFileGetPropertyInfo failed");
					AudioFileClose(mAudioFileID);
					return NULL;
				}
				UInt8	*markersBuffer = new UInt8[size];
				AudioFileMarkerList	*markers = reinterpret_cast<AudioFileMarkerList*>(markersBuffer);
				
				err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyMarkerList, 
										   &size, markers);
				if (err) {
					//NSLog(@"AudioFileGetProperty failed");
					AudioFileClose(mAudioFileID);
					return NULL;
				}
				
				//ループポイントの設定
				for (unsigned int i=0; i<markers->mNumberMarkers; i++) {
					if (markers->mMarkers[i].mMarkerID == instChunk[11] ) {
						st_point = markers->mMarkers[i].mFramePosition;
					}
					else if (markers->mMarkers[i].mMarkerID == instChunk[13] ) {
						end_point = markers->mMarkers[i].mFramePosition;
					}
					CFRelease(markers->mMarkers[i].mName);
				}
				if ( st_point < end_point ) {
					mInstData.loop = 1;
				}
				delete [] markersBuffer;
			}
			delete [] instChunk;
		}
		
	}
	else if ( fileTypeID == kAudioFileWAVEType ) {
		//smplチャンクの取得
		AudioFileGetUserDataSize( mAudioFileID, 'smpl', 0, &size );
		if ( size >= sizeof(WAV_smpl) ) {
			UInt8	*smplChunk = new UInt8[size];
			AudioFileGetUserData( mAudioFileID, 'smpl', 0, &size, smplChunk );
			WAV_smpl	*smpl = (WAV_smpl *)smplChunk;
			
			smpl->loops = EndianU32_LtoN( smpl->loops );
			
			if ( smpl->loops > 0 ) {
				mInstData.loop = true;
				mInstData.basekey = EndianU32_LtoN( smpl->note );
				st_point = EndianU32_LtoN( smpl->start );
				end_point = EndianU32_LtoN( smpl->end ) + 1;	//SoundForge等では最終ポイントを含める解釈
				//end_point = EndianU32_LtoN( smpl->end );	//PeakではなぜかAIFFと同じ
			}
			else {
				mInstData.basekey = EndianU32_LtoN( smpl->note );
			}
			delete [] smplChunk;
		}
	}
	
	//容量の制限
	SInt64	dataSamples = dataSize / fileDescription.mBytesPerFrame;
	if ( dataSamples > MAXIMUM_SAMPLES ) {
		dataSize = MAXIMUM_SAMPLES * fileDescription.mBytesPerFrame;
	}
	if ( st_point > MAXIMUM_SAMPLES ) {
		st_point = MAXIMUM_SAMPLES;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		end_point = MAXIMUM_SAMPLES;
	}
	
    // 波形一時読み込み用メモリを確保
    char *fileBuffer;
	unsigned int	fileBufferSize;
	if (mInstData.loop) {
		fileBufferSize = dataSize+EXPAND_BUFFER*fileDescription.mBytesPerFrame;
	}
	else {
		fileBufferSize = dataSize;
	}
	fileBuffer = new char[fileBufferSize];
	memset(fileBuffer, 0, fileBufferSize);
	
	// ファイルから波形データの読み込み
	err = AudioFileReadBytes(mAudioFileID, false, 0, &dataSize, fileBuffer);
    if (err) {
        //NSLog(@"AudioFileReadBytes failed");
        AudioFileClose(mAudioFileID);
        delete [] fileBuffer;
        return false;
    }
    AudioFileClose(mAudioFileID);
	
    //ループを展開する
    Float64	adjustment = 1.0;
    outputFormat=fileDescription;
	if (mInstData.loop) {
		UInt32	plusalpha=0, framestocopy;
		while (plusalpha < EXPAND_BUFFER) {
			framestocopy = 
			(end_point-st_point)>(EXPAND_BUFFER-plusalpha)?(EXPAND_BUFFER-plusalpha):end_point-st_point;
			memcpy(fileBuffer+((int)end_point+plusalpha)*fileDescription.mBytesPerFrame,
				   fileBuffer+(int)st_point*fileDescription.mBytesPerFrame,
				   framestocopy*fileDescription.mBytesPerFrame);
			plusalpha += framestocopy;
		}
		dataSize += plusalpha*fileDescription.mBytesPerFrame;
		
		//16サンプル境界にFIXする
		adjustment = ( (long long)((end_point-st_point)/16) ) / ((end_point-st_point)/16.0);
		st_point *= adjustment;
		end_point *= adjustment;
	}
	outputFormat.mFormatID = kAudioFormatLinearPCM;
    outputFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagsNativeEndian;
	outputFormat.mChannelsPerFrame = 1;
	outputFormat.mBytesPerFrame = sizeof(float);
	outputFormat.mBitsPerChannel = 32;
	outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame;
	
    // バイトオーダー変換用のコンバータを用意
    AudioConverterRef converter;
	err = AudioConverterNew(&fileDescription, &outputFormat, &converter);
    if (err) {
        //NSLog(@"AudioConverterNew failed");
        delete [] fileBuffer;
        return false;
    }
	
	//サンプリングレート変換の質を最高に設定
//	if (fileDescription.mSampleRate != outputFormat.mSampleRate) {
//		size = sizeof(UInt32);
//		UInt32	setProp = kAudioConverterQuality_Max;
//		AudioConverterSetProperty(converter, kAudioConverterSampleRateConverterQuality,
//								  size, &setProp);
//        
//        size = sizeof(UInt32);
//		setProp = kAudioConverterSampleRateConverterComplexity_Mastering;
//		AudioConverterSetProperty(converter, kAudioConverterSampleRateConverterComplexity,
//								  size, &setProp);
//        
//	}
	
    //出力に必要十分なバッファサイズを得る
	UInt32	outputSize = dataSize;
	size = sizeof(UInt32);
	err = AudioConverterGetProperty(converter, kAudioConverterPropertyCalculateOutputBufferSize, 
									&size, &outputSize);
	if (err) {
		//NSLog(@"AudioConverterGetProperty failed");
		delete [] fileBuffer;
		AudioConverterDispose(converter);
        return false;
	}
    UInt32 monoSamples = outputSize/sizeof(float);
    
    // バイトオーダー変換
    float *monoData = new float[monoSamples];
	AudioConverterConvertBuffer(converter, dataSize, fileBuffer,
								&outputSize, monoData);
    if(outputSize == 0) {
        //NSLog(@"AudioConverterConvertBuffer failed");
        delete [] fileBuffer;
        AudioConverterDispose(converter);
        return false;
    }
    
    //ループ長が16の倍数でない場合はサンプリングレート変換
    Float64 inputSampleRate = fileDescription.mSampleRate;
    Float64 outputSampleRate = fileDescription.mSampleRate * adjustment;
    int	outSamples = monoSamples;
    if ( outputSampleRate == inputSampleRate ) {
        m_pAudioData = new short[monoSamples];
        for (int i=0; i<monoSamples; i++) {
            m_pAudioData[i] = static_cast<short>(monoData[i] * 32768);
        }
    }
    else {
        outSamples = static_cast<int>(monoSamples / (inputSampleRate / outputSampleRate));
        m_pAudioData = new short[outSamples];
        resampling(monoData, monoSamples, inputSampleRate,
                   m_pAudioData, &outSamples, outputSampleRate);
    }
    
    // 後始末
    delete [] monoData;
    delete [] fileBuffer;
    AudioConverterDispose(converter);
	
	//Instデータの設定
	if ( st_point > MAXIMUM_SAMPLES ) {
		mInstData.lp = MAXIMUM_SAMPLES;
	}
	else {
		mInstData.lp			= st_point;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		mInstData.lp_end = MAXIMUM_SAMPLES;
	}
	else {
		mInstData.lp_end		= end_point;
	}
	mInstData.srcSamplerate	= outputSampleRate;
    mLoadedSamples			= outSamples;
	
	mIsLoaded = true;
	
	return true;
#else
	//Windowsのオーディオファイル読み込み処理

	// ファイルを開く
	HMMIO	hmio = NULL;
	MMRESULT	err;
	DWORD		size;

	hmio = mmioOpen( mPath, NULL, MMIO_READ );
	if ( !hmio ) {
		return false;
	}
	
	// RIFFチャンクを探す
	MMCKINFO	riffChunkInfo;
	riffChunkInfo.fccType = mmioFOURCC('W', 'A', 'V', 'E');
	err = mmioDescend( hmio, &riffChunkInfo, NULL, MMIO_FINDRIFF );
	if ( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( (riffChunkInfo.ckid != FOURCC_RIFF) || (riffChunkInfo.fccType != mmioFOURCC('W', 'A', 'V', 'E') ) ) {
		mmioClose( hmio, 0 );
		return false;
	}

	// フォーマットチャンクを探す
	MMCKINFO	formatChunkInfo;
	formatChunkInfo.ckid = mmioFOURCC('f', 'm', 't', ' ');
	err = mmioDescend( hmio, &formatChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if ( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( formatChunkInfo.cksize < sizeof(PCMWAVEFORMAT) ) {
		mmioClose( hmio, 0 );
		return false;
	}

	//フォーマット情報を取得
	WAVEFORMATEX	pcmWaveFormat;
	DWORD			fmsize = (formatChunkInfo.cksize > sizeof(WAVEFORMATEX)) ? sizeof(WAVEFORMATEX):formatChunkInfo.cksize;
	size = mmioRead( hmio, (HPSTR)&pcmWaveFormat, fmsize );
	if ( size != fmsize ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( pcmWaveFormat.wFormatTag != WAVE_FORMAT_PCM ) {
		mmioClose( hmio, 0 );
		return false;
	}
	mmioAscend(hmio, &formatChunkInfo, 0);

	// Instrument情報を初期化
	mInstData.basekey	= 60;
	mInstData.lowkey	= 0;
	mInstData.highkey	= 127;
	mInstData.loop		= 0;

	//smplチャンクを探す
	MMCKINFO	smplChunkInfo;
	smplChunkInfo.ckid = mmioFOURCC('s', 'm', 'p', 'l');
	err = mmioDescend( hmio, &smplChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if ( err != MMSYSERR_NOERROR ) {
		smplChunkInfo.cksize = 0;
	}
	double	st_point=0.0;
	double	end_point=0.0;
	if ( smplChunkInfo.cksize >= sizeof(WAV_smpl) ) {
		//ループポイントの取得
		unsigned char	*smplChunk = new unsigned char[smplChunkInfo.cksize];
		size = mmioRead(hmio,(HPSTR)smplChunk, smplChunkInfo.cksize);
		WAV_smpl	*smpl = (WAV_smpl *)smplChunk;

		if ( smpl->loops > 0 ) {
			mInstData.loop = 1;
			mInstData.basekey = smpl->note;
			st_point = smpl->start;
			end_point = smpl->end + 1;	//SoundForge等では最終ポイントを含める解釈
		}
		else {
			mInstData.basekey = smpl->note;
		}
		delete [] smplChunk;
	}
	mmioAscend(hmio, &formatChunkInfo, 0);

	//dataチャンクを探す
	MMCKINFO dataChunkInfo;
	dataChunkInfo.ckid = mmioFOURCC('d', 'a', 't', 'a');
	err = mmioDescend( hmio, &dataChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}

	// 波形一時読み込み用メモリを確保
	unsigned int	dataSize = dataChunkInfo.cksize;
	int				bytesPerSample = pcmWaveFormat.nBlockAlign;
	char			*fileBuffer;
	unsigned int	fileBufferSize;

	//容量制限
	int	dataSamples = dataSize / pcmWaveFormat.nBlockAlign;
	if ( dataSamples > MAXIMUM_SAMPLES ) {
		dataSize = MAXIMUM_SAMPLES * pcmWaveFormat.nBlockAlign;
	}
	if ( st_point > MAXIMUM_SAMPLES ) {
		st_point = MAXIMUM_SAMPLES;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		end_point = MAXIMUM_SAMPLES;
	}
	
	
	if (mInstData.loop) {
		fileBufferSize = dataSize+EXPAND_BUFFER*bytesPerSample;
	}
	else {
		fileBufferSize = dataSize;
	}
	fileBuffer = new char[fileBufferSize];
	memset(fileBuffer, 0, fileBufferSize);
	
	// ファイルから波形データの読み込み
	size = mmioRead(hmio, (HPSTR)fileBuffer, dataSize);
	if ( size != dataSize ) {
		mmioClose( hmio, 0 );
		return false;
	}
	mmioClose(hmio,0);

	//ループを展開する
	double	inputSampleRate = pcmWaveFormat.nSamplesPerSec;
	double	outputSampleRate = inputSampleRate;
	if (mInstData.loop) {
		unsigned int	plusalpha=0;
		double			framestocopy;
		while (plusalpha < EXPAND_BUFFER) {
			framestocopy = 
			(end_point-st_point)>(EXPAND_BUFFER-plusalpha)?(EXPAND_BUFFER-plusalpha):end_point-st_point;
			memcpy(fileBuffer+((int)end_point+plusalpha)*bytesPerSample,
				   fileBuffer+(int)st_point*bytesPerSample,
				   static_cast<size_t>(framestocopy*bytesPerSample));
			plusalpha += static_cast<unsigned int>(framestocopy);
		}
		dataSize += plusalpha*bytesPerSample;
		
		//16サンプル境界にFIXする
		double	adjustment = ( (long long)((end_point-st_point)/16) ) / ((end_point-st_point)/16.0);
		outputSampleRate *= adjustment;
		st_point *= adjustment;
		end_point *= adjustment;
	}

	//一旦floatモノラルデータに変換
	int	bytesPerChannel = bytesPerSample / pcmWaveFormat.nChannels;
	unsigned int	inputPtr = 0;
	unsigned int	outputPtr = 0;
	int				monoSamples = dataSize / bytesPerSample;
	float	range = static_cast<float>((1<<(bytesPerChannel*8-1)) * pcmWaveFormat.nChannels);
	float	*monoData = new float[monoSamples];
	while (inputPtr < dataSize) {
		int	frameSum = 0;
		for (int ch=0; ch<pcmWaveFormat.nChannels; ch++) {
			for (int i=0; i<bytesPerChannel; i++) {
				if (i<bytesPerChannel-1) {
					frameSum += (unsigned char)fileBuffer[inputPtr] << (8*i);
				}
				else {
					frameSum += fileBuffer[inputPtr] << (8*i);
				}
				inputPtr++;
			}
		}
		monoData[outputPtr] = frameSum / range;
		outputPtr++;
	}

	//ループ長が16の倍数でない場合はサンプリングレート変換
	int	outSamples = monoSamples;
	if ( outputSampleRate == inputSampleRate ) {
		m_pAudioData = new short[monoSamples];
		for (int i=0; i<monoSamples; i++) {
			m_pAudioData[i] = static_cast<short>(monoData[i] * 32768);
		}
	}
	else {
		outSamples = static_cast<int>(monoSamples / (inputSampleRate / outputSampleRate));
		m_pAudioData = new short[outSamples];
		resampling(monoData, monoSamples, inputSampleRate,
				   m_pAudioData, &outSamples, outputSampleRate);
	}

	// 後始末
	delete [] fileBuffer;
	delete [] monoData;

	//Instデータの設定
	mInstData.lp			= static_cast<int>(st_point);
	mInstData.lp_end		= static_cast<int>(end_point);
	mInstData.srcSamplerate	= outputSampleRate;
    mLoadedSamples			= outSamples;

	mIsLoaded = true;

	return true;
#endif
}
Ejemplo n.º 6
0
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ALUTAPI ALvoid ALUTAPIENTRY alutLoadWAVFile(ALbyte *file,ALenum *format,ALvoid **data,ALsizei *size,ALsizei *freq)
{
	OSStatus		err = noErr;
	AudioFileID		audioFile = 0;
	FSRef			fsRef;

	*data = NULL; // in case of failure, do not return some unitialized value as a bogus address

	if (IsRelativePath(file))
	{
		char			absolutePath[256];
		// we need to make a full path here so FSPathMakeRef() works properly
		MakeAbsolutePath(file, absolutePath, 256);
		// create an fsref from the file parameter
		err = FSPathMakeRef ((const UInt8 *) absolutePath, &fsRef, NULL);
	}
	else
		err = FSPathMakeRef ((const UInt8 *) file, &fsRef, NULL);
	
	if (err == noErr)
	{
		err = AudioFileOpen(&fsRef, fsRdPerm, 0, &audioFile);
		if (err == noErr)
		{
			UInt32							dataSize;
			CAStreamBasicDescription		asbd;
			
			dataSize = sizeof(CAStreamBasicDescription);
			AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat, &dataSize, &asbd);
			
			*format = GetOALFormatFromASBD(asbd);
			if (IsFormatSupported(*format))
			{
				*freq = (UInt32) asbd.mSampleRate;
				
				SInt64	audioDataSize = 0;
				dataSize = sizeof(audioDataSize);
				err = AudioFileGetProperty(audioFile, kAudioFilePropertyAudioDataByteCount, &dataSize, &audioDataSize);
				if (err == noErr)
				{
					*size = audioDataSize;
					*data = NULL;
					*data = calloc(1, audioDataSize);
					if (*data)
					{
						dataSize = audioDataSize;
						err = AudioFileReadBytes(audioFile, false, 0, &dataSize, *data);
						
						if ((asbd.mFormatID == kAudioFormatLinearPCM) && (asbd.mBitsPerChannel > 8))
						{
							// we just got 16 bit pcm data out of a WAVE file on a big endian platform, so endian swap the data
							AudioConverterRef				converter;
							CAStreamBasicDescription		outFormat = asbd;
							void *							tempData = NULL;
							
							// ste format to big endian
							outFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
							// make some place for converted data
							tempData = calloc(1 , audioDataSize);
							
							err = AudioConverterNew(&asbd, &outFormat, &converter);
							if ((err == noErr) && (tempData != NULL))
							{
								UInt32		bufferSize = audioDataSize;
								err = AudioConverterConvertBuffer(converter, audioDataSize, *data, &bufferSize, tempData);
								if (err == noErr)
									memcpy(*data, tempData, audioDataSize);
								AudioConverterDispose(converter);
							}
							if (tempData) free (tempData);
						}
					}
				}
			}
			err = AudioFileClose(audioFile);
		}
	}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This currently will only work for cbr formats
OSStatus OALBuffer::ConvertDataForBuffer(void *inData, UInt32 inDataSize, UInt32	inDataFormat, UInt32 inDataSampleRate)
{
#if LOG_VERBOSE
	DebugMessageN5("OALBuffer::ConvertDataForBuffer() - OALBuffer:inData:inDataSize:inDataFormat:inDataSampleRate = %ld:%p:%ld:%ld:%ld", (long int) mSelfToken, inData, (long int) inDataSize, (long int) inDataFormat, (long int) inDataSampleRate);
#endif    
	OSStatus					result = noErr;

    try {

        AudioConverterRef			converter;
        CAStreamBasicDescription	destFormat;
        UInt32						framesOfSource = 0;

        if (inData == NULL)
            throw ((OSStatus) AL_INVALID_OPERATION);

        result = FillInASBD(mPreConvertedDataFormat, inDataFormat, inDataSampleRate);
            THROW_RESULT

        if (mPreConvertedDataFormat.NumberChannels() == 1)
            mPreConvertedDataFormat.mFormatFlags |= kAudioFormatFlagIsNonInterleaved; 
                    
        destFormat.mChannelsPerFrame = mPreConvertedDataFormat.NumberChannels();
        destFormat.mSampleRate = mPreConvertedDataFormat.mSampleRate;
        destFormat.mFormatID = kAudioFormatLinearPCM;
        
        if (mPreConvertedDataFormat.NumberChannels() == 1)
            destFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
        else
            destFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked; // leave stereo data interleaved, and an AC will be used for deinterleaving later on
        
        destFormat.mFramesPerPacket = 1;	
        destFormat.mBitsPerChannel = sizeof (Float32) * 8;	
        destFormat.mBytesPerPacket = sizeof (Float32) * destFormat.NumberChannels();	
        destFormat.mBytesPerFrame = sizeof (Float32) * destFormat.NumberChannels();
        
        result = FillInASBD(mDataFormat, inDataFormat, UInt32(destFormat.mSampleRate));
            THROW_RESULT
            
        result = AudioConverterNew(&mPreConvertedDataFormat, &destFormat, &converter);
            THROW_RESULT

		framesOfSource = inDataSize / mPreConvertedDataFormat.mBytesPerFrame; // THIS ONLY WORKS FOR CBR FORMATS
			
       	UInt32 dataSize = framesOfSource * sizeof(Float32) * destFormat.NumberChannels();
        
		mDataSize = (UInt32) dataSize;

        if (mData != NULL)
        {
            if (mDataSize != dataSize)
            {
                mDataSize = dataSize;
                void *newDataPtr = realloc(mData, mDataSize);
                if (newDataPtr == NULL)
                    throw ((OSStatus) AL_INVALID_OPERATION);
                    
                mData = (UInt8 *) newDataPtr;
            }		
        }
        else
        {
            mDataSize = dataSize;
            mData = (UInt8 *) malloc (mDataSize);
            if (mData == NULL)
                throw ((OSStatus) AL_INVALID_OPERATION);
        }

        if (mData != NULL)
        {
			result = AudioConverterConvertBuffer(converter, inDataSize, inData, &mDataSize, mData);
			if (result == noErr)
            {
                mDataFormat.SetFrom(destFormat);
				if (mPreConvertedDataFormat.NumberChannels() == 1)
					mDataHasBeenConverted = true;
				else
					mDataHasBeenConverted = false;
			}
        }
        
        AudioConverterDispose(converter);
    }
    catch (OSStatus     result) {
        return (result);
    }
    catch (...) {
        result = (OSStatus) AL_INVALID_OPERATION;
    }
    
	return (result);
}
Ejemplo n.º 8
0
static void readCallback(void *aqData,
						 AudioQueueRef inAQ,
						 AudioQueueBufferRef inBuffer,
						 const AudioTimeStamp * inStartTime,
						 UInt32 inNumPackets,
						 const AudioStreamPacketDescription * inPacketDesc)
{
	AQData *d = (AQData *) aqData;
	OSStatus err;
	mblk_t *rm = NULL;

	UInt32 len =
		(inBuffer->mAudioDataByteSize * d->readAudioFormat.mSampleRate /
		 1) / d->devicereadFormat.mSampleRate /
		d->devicereadFormat.mChannelsPerFrame;

	ms_mutex_lock(&d->mutex);
	if (d->read_started == FALSE) {
		ms_mutex_unlock(&d->mutex);
		return;
	}

	rm = allocb(len, 0);

#if 0
	err = AudioConverterConvertBuffer(d->readAudioConverter,
									  inBuffer->mAudioDataByteSize,
									  inBuffer->mAudioData,
									  &len, rm->b_wptr);
	if (err != noErr) {
		ms_error("readCallback: AudioConverterConvertBuffer %d", err);
		ms_warning("readCallback: inBuffer->mAudioDataByteSize = %d",
				   inBuffer->mAudioDataByteSize);
		ms_warning("readCallback: outlen = %d", len);
		ms_warning("readCallback: origlen = %i",
				   (inBuffer->mAudioDataByteSize *
					d->readAudioFormat.mSampleRate / 1) /
				   d->devicereadFormat.mSampleRate /
				   d->devicereadFormat.mChannelsPerFrame);
		freeb(rm);
	} else {

	  rm->b_wptr += len;
	  if (gain_volume_in != 1.0f)
	    {
	      int16_t *ptr=(int16_t *)rm->b_rptr;
	      for (;ptr<(int16_t *)rm->b_wptr;ptr++)
		{
		  *ptr=(int16_t)(((float)(*ptr))*gain_volume_in);
		}
	    }
	  putq(&d->rq, rm);
	}
#else
	memcpy(rm->b_wptr, inBuffer->mAudioData, len);
	rm->b_wptr += len;
	if (gain_volume_in != 1.0f)
	{
		int16_t *ptr=(int16_t *)rm->b_rptr;
		for (;ptr<(int16_t *)rm->b_wptr;ptr++)
		{
			*ptr=(int16_t)(((float)(*ptr))*gain_volume_in);
		}
	}
	putq(&d->rq, rm);	
#endif
	
	err = AudioQueueEnqueueBuffer(d->readQueue, inBuffer, 0, NULL);
	if (err != noErr) {
		ms_error("readCallback:AudioQueueEnqueueBuffer %d", err);
	}
	ms_mutex_unlock(&d->mutex);
}