Пример #1
0
/*
 * Free codec.
 */
static pj_status_t ilbc_dealloc_codec( pjmedia_codec_factory *factory, 
				      pjmedia_codec *codec )
{
    struct ilbc_codec *ilbc_codec;

    PJ_ASSERT_RETURN(factory && codec, PJ_EINVAL);
    PJ_UNUSED_ARG(factory);
    PJ_ASSERT_RETURN(factory == &ilbc_factory.base, PJ_EINVAL);

    ilbc_codec = (struct ilbc_codec*) codec;

#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
    if (ilbc_codec->enc) {
	AudioConverterDispose(ilbc_codec->enc);
	ilbc_codec->enc = NULL;
    }
    if (ilbc_codec->dec) {
	AudioConverterDispose(ilbc_codec->dec);
	ilbc_codec->dec = NULL;
    }
#endif

    pj_pool_release(ilbc_codec->pool);

    return PJ_SUCCESS;
}
Пример #2
0
static GVBool gviHardwareInitCapture(GVIDevice * device)
{
	GVIHardwareData * data = (GVIHardwareData *)device->m_data;
	UInt32 size;
	OSStatus result;
	GVICapturedFrame * frame;
	int numCaptureBufferBytes;
	int numCaptureBufferFrames;
	int i;

	// get the capture format
	size = sizeof(AudioStreamBasicDescription);
	result = AudioDeviceGetProperty(device->m_deviceID, 0, true, kAudioDevicePropertyStreamFormat, &size, &data->m_captureStreamDescriptor);
	if(result != noErr)
		return GVFalse;

	// create a converter from the capture format to the GV format
	result = AudioConverterNew(&data->m_captureStreamDescriptor, &GVIVoiceFormat, &data->m_captureConverter);
	if(result != noErr)
		return GVFalse;

	// allocate a capture buffer
	data->m_captureBuffer = (GVSample *)gsimalloc(GVIBytesPerFrame);
	if(!data->m_captureBuffer)
	{
		AudioConverterDispose(data->m_captureConverter);
		return GVFalse;		
	}

	// allocate space for holding captured frames
	numCaptureBufferBytes = gviMultiplyByBytesPerMillisecond(GVI_CAPTURE_BUFFER_MILLISECONDS);
	numCaptureBufferBytes = gviRoundUpToNearestMultiple(numCaptureBufferBytes, GVIBytesPerFrame);
	numCaptureBufferFrames = (numCaptureBufferBytes / GVIBytesPerFrame);
	for(i = 0 ; i < numCaptureBufferFrames ; i++)
	{
		frame = (GVICapturedFrame *)gsimalloc(sizeof(GVICapturedFrame) + GVIBytesPerFrame - sizeof(GVSample));
		if(!frame)
		{
			gviFreeCapturedFrames(&data->m_captureAvailableFrames);
			gsifree(data->m_captureBuffer);
			AudioConverterDispose(data->m_captureConverter);
			return GVFalse;
		}
		gviPushFirstFrame(&data->m_captureAvailableFrames, frame);
	}

	// init the last crossed time
	data->m_captureLastCrossedThresholdTime = (data->m_captureClock - GVI_HOLD_THRESHOLD_FRAMES - 1);

	// add property listener
	AudioDeviceAddPropertyListener(device->m_deviceID, 0, true, kAudioDevicePropertyDeviceIsAlive, gviPropertyListener, device);

#if GVI_VOLUME_IN_SOFTWARE
	// init volume
	data->m_captureVolume = (GVScalar)1.0;
#endif

	return GVTrue;
}
Пример #3
0
    void
    AACEncode::setBitrate(int bitrate)
    {
        if(m_bitrate != bitrate) {
            m_converterMutex.lock();
            UInt32 br = bitrate;
            AudioConverterDispose(m_audioConverter);

            const OSType subtype = kAudioFormatMPEG4AAC;
            AudioClassDescription requestedCodecs[2] = {
                {
                    kAudioEncoderComponentType,
                    subtype,
                    kAppleSoftwareAudioCodecManufacturer
                },
                {
                    kAudioEncoderComponentType,
                    subtype,
                    kAppleHardwareAudioCodecManufacturer
                }
            };
            AudioConverterNewSpecific(&m_in, &m_out, 2,requestedCodecs, &m_audioConverter);
            OSStatus result = AudioConverterSetProperty(m_audioConverter, kAudioConverterEncodeBitRate, sizeof(br), &br);
            UInt32 propSize = sizeof(br);
            
            if(result == noErr) {
                AudioConverterGetProperty(m_audioConverter, kAudioConverterEncodeBitRate, &propSize, &br);
                m_bitrate = br;
            }
            m_converterMutex.unlock();
        }
    }
Пример #4
0
static int close_coreaudio(audio_output_t *ao)
{
	mpg123_coreaudio_t* ca = (mpg123_coreaudio_t*)ao->userptr;

	if (ca) {
		ca->decode_done = 1;
		while(!ca->play_done && ca->play) usleep(10000);
		
		/* No matter the error code, we want to close it (by brute force if necessary) */
		AudioConverterDispose(ca->converter);
		AudioOutputUnitStop(ca->outputUnit);
		AudioUnitUninitialize(ca->outputUnit);
		CloseComponent(ca->outputUnit);
	
	    /* Free the ring buffer */
		sfifo_close( &ca->fifo );
		
		/* Free the conversion buffer */
		if (ca->buffer) {
			free( ca->buffer );
			ca->buffer = NULL;
		}
		
	}
	
	return 0;
}
Пример #5
0
void ca_close(phastream_t *as) {
	ca_dev *cadev = (ca_dev *) as->drvinfo;

	DBG_DYNA_AUDIO_DRV("** Closing audio stream\n");


	verify_noerr(AudioDeviceStop(get_audiodeviceid(cadev->inputID), input_proc));

	clean_input_device(as);

	verify_noerr(AudioOutputUnitStop(cadev->outputAU));
	verify_noerr(AudioUnitUninitialize (cadev->outputAU));

	printf("phad_coreaudio:ca_close:cleaning cadev\n"); // power pc hack 2/2
	
	if (cadev) {
		if (cadev->convertedInputBuffer) {
			free(cadev->convertedInputBuffer);
		}
		cadev->convertedInputBuffer = NULL;
		if (cadev->inputConverter) {
			AudioConverterDispose(cadev->inputConverter);
			cadev->inputConverter = NULL;
		}
		free(cadev);
		as->drvinfo = 0;
	}
}
Пример #6
0
Audio_Stream::~Audio_Stream()
{
    if (m_defaultContentType) {
        CFRelease(m_defaultContentType), m_defaultContentType = NULL;
    }
    
    if (m_contentType) {
        CFRelease(m_contentType), m_contentType = NULL;
    }
    
    close();
    
    delete [] m_outputBuffer, m_outputBuffer = 0;
    
    m_httpStream->m_delegate = 0;
    delete m_httpStream, m_httpStream = 0;
    
    if (m_audioConverter) {
        AudioConverterDispose(m_audioConverter), m_audioConverter = 0;
    }
    
    if (m_fileOutput) {
        delete m_fileOutput, m_fileOutput = 0;
    }
}
Пример #7
0
static void gviCleanupCapture(GVIHardwareData * data)
{
	if(data->m_captureConverter)
		AudioConverterDispose(data->m_captureConverter);
	gviFreeCapturedFrames(&data->m_captureAvailableFrames);
	gviFreeCapturedFrames(&data->m_capturedFrames);
	gsifree(data->m_captureBuffer);
}
Пример #8
0
static void gviCleanupPlayback(GVIHardwareData * data)
{
	if(data->m_playbackSources)
		gviFreeSourceList(data->m_playbackSources);
	if(data->m_playbackConverter)
		AudioConverterDispose(data->m_playbackConverter);
	gsifree(data->m_playbackBuffer);
}
Пример #9
0
RageSound_CA::~RageSound_CA()
{
	if( mOutputDevice != NULL )
		mOutputDevice->StopIOProc( GetData );
	delete mOutputDevice;

	if( mConverter != NULL )
		AudioConverterDispose( mConverter );
}
Пример #10
0
// _______________________________________________________________________________________
//
void	CAAudioFile::CloseConverter()
{
	if (mConverter) {
#if VERBOSE_CONVERTER
		printf("CAAudioFile %p : CloseConverter\n", this);
#endif
		AudioConverterDispose(mConverter);
		mConverter = NULL;
	}
}
Пример #11
0
/***********************************************************************
 * Close
 ***********************************************************************
 *
 **********************************************************************/
void encCoreAudioClose( hb_work_object_t * w )
{
    hb_work_private_t * pv = w->private_data;

    if( pv->converter )
    {
        AudioConverterDispose( pv->converter );
        hb_list_empty( &pv->list );
        free( pv->obuf );
        free( pv->buf );
        free( pv );
        w->private_data = NULL;
    }
}
Пример #12
0
static void ca_close_capture(ALCdevice *device)
{
    ca_data *data = (ca_data*)device->ExtraData;

    DestroyRingBuffer(data->ring);
    free(data->resampleBuffer);
    destroy_buffer_list(data->bufferList);

    AudioConverterDispose(data->audioConverter);
    CloseComponent(data->audioUnit);

    free(data);
    device->ExtraData = NULL;
}
Пример #13
0
static void aq_stop_w(MSFilter * f)
{
	AQData *d = (AQData *) f->data;
	if (d->write_started == TRUE) {
		ms_mutex_lock(&d->mutex);
		d->write_started = FALSE;	/* avoid a deadlock related to buffer conversion in callback */
		ms_mutex_unlock(&d->mutex);
#if 0
		AudioConverterDispose(d->writeAudioConverter);
#endif
		AudioQueueStop(d->writeQueue, true);

		AudioQueueDispose(d->writeQueue, true);
	}
}
Пример #14
0
bool SFB::Audio::Converter::Close(CFErrorRef *error)
{
	if(!IsOpen()) {
		LOGGER_WARNING("org.sbooth.AudioEngine.AudioConverter", "Close() called on an AudioConverter that hasn't been opened");
		return true;
	}

	mConverterState.reset();
	mDecoder.reset();

	if(mConverter)
		AudioConverterDispose(mConverter), mConverter = nullptr;

	mIsOpen = false;
	return true;
}
Пример #15
0
static void ca_stop_w(CAData *d){
	OSErr err;
	if(d->write_started == TRUE) {
		if(AudioOutputUnitStop(d->caOutAudioUnit) == noErr)
			d->write_started=FALSE;
	}
	if (d->caOutConverter!=NULL)
	{
		AudioConverterDispose(d->caOutConverter);
		d->caOutConverter=NULL;
	}
	if (d->caOutAudioUnit!=NULL)
	{
		AudioUnitUninitialize(d->caOutAudioUnit);
		d->caOutAudioUnit=NULL;
	}	
}
Пример #16
0
Audio_Stream::~Audio_Stream()
{
    close();
    
    delete [] m_outputBuffer, m_outputBuffer = 0;
    
    m_httpStream->m_delegate = 0;
    delete m_httpStream, m_httpStream = 0;
    
    if (m_audioConverter) {
        AudioConverterDispose(m_audioConverter), m_audioConverter = 0;
    }
    
    if (m_fileOutput) {
        delete m_fileOutput, m_fileOutput = 0;
    }
}
Пример #17
0
static void aq_stop_w(MSFilter * f)
{
    AQData *d = (AQData *) f->data;
    if (d->write_started == TRUE) {
        ms_mutex_lock(&d->mutex);
        d->write_started = FALSE;	/* avoid a deadlock related to buffer conversion in callback */
        ms_mutex_unlock(&d->mutex);
#if 0
        AudioConverterDispose(d->writeAudioConverter);
#endif
        AudioQueueStop(d->writeQueue, true);

        AudioQueueDispose(d->writeQueue, true);

#if TARGET_OS_IPHONE
        check_aqresult(AudioSessionSetActive(false),"AudioSessionSetActive(false)");
#endif
    }
}
Пример #18
0
nsresult
AppleATDecoder::Shutdown()
{
    LOG("Shutdown: Apple AudioToolbox AAC decoder");
    OSStatus rv1 = AudioConverterDispose(mConverter);
    if (rv1) {
        LOG("error %d disposing of AudioConverter", rv1);
    } else {
        mConverter = nullptr;
    }

    OSStatus rv2 = AudioFileStreamClose(mStream);
    if (rv2) {
        LOG("error %d closing AudioFileStream", rv2);
    } else {
        mStream = nullptr;
    }

    return (rv1 && rv2) ? NS_OK : NS_ERROR_FAILURE;
}
Пример #19
0
nsresult
AppleATDecoder::Shutdown()
{
  LOG("Shutdown: Apple AudioToolbox AAC decoder");
  mQueuedSamples.Clear();
  OSStatus rv = AudioConverterDispose(mConverter);
  if (rv) {
    LOG("error %d disposing of AudioConverter", rv);
    return NS_ERROR_FAILURE;
  }
  mConverter = nullptr;

  if (mStream) {
    rv = AudioFileStreamClose(mStream);
    if (rv) {
      LOG("error %d disposing of AudioFileStream", rv);
      return NS_ERROR_FAILURE;
    }
    mStream = nullptr;
  }
  return NS_OK;
}
Пример #20
0
void
AppleATDecoder::ProcessShutdown()
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

  if (mStream) {
    OSStatus rv = AudioFileStreamClose(mStream);
    if (rv) {
      LOG("error %d disposing of AudioFileStream", rv);
      return;
    }
    mStream = nullptr;
  }

  if (mConverter) {
    LOG("Shutdown: Apple AudioToolbox AAC decoder");
    OSStatus rv = AudioConverterDispose(mConverter);
    if (rv) {
      LOG("error %d disposing of AudioConverter", rv);
    }
    mConverter = nullptr;
  }
}
Пример #21
0
/***********************************************************************
 * Close
 ***********************************************************************
 *
 **********************************************************************/
void encCoreAudioClose(hb_work_object_t *w)
{
    hb_work_private_t *pv = w->private_data;

    if (pv != NULL)
    {
        if (pv->converter)
        {
            AudioConverterDispose(pv->converter);
        }
        if (pv->buf != NULL)
        {
            free(pv->buf);
        }
        if (pv->remap != NULL)
        {
            hb_audio_remap_free(pv->remap);
        }
        hb_list_empty(&pv->list);
        free(pv);
        w->private_data = NULL;
    }
}
Пример #22
0
static void ca_stop_r(CAData *d){
	OSErr err;
	if(d->read_started == TRUE) {
		if(AudioOutputUnitStop(d->caInAudioUnit) == noErr)
			d->read_started=FALSE;
	}
	if (d->caInConverter!=NULL)
	{
		AudioConverterDispose(d->caInConverter);
		d->caInConverter=NULL;
	}
	if (d->caInAudioUnit!=NULL)
	{
		AudioUnitUninitialize(d->caInAudioUnit);
		d->caInAudioUnit=NULL;
	}
	if (d->fAudioBuffer)
		DestroyAudioBufferList(d->fAudioBuffer);
	d->fAudioBuffer=NULL;
	if (d->fMSBuffer)
		DestroyAudioBufferList(d->fMSBuffer);
	d->fMSBuffer=NULL;
}
Пример #23
0
//-----------------------------------------------------------------------------
bool AudioFile::Load()
{
#if MAC
	AudioFileID mAudioFileID;
    AudioStreamBasicDescription fileDescription, outputFormat;
    SInt64 dataSize64;
    UInt32 dataSize;
	
	OSStatus err;
	UInt32 size;
	
    // ファイルを開く
	FSRef	ref;
	Boolean	isDirectory=false;
	FSPathMakeRef((const UInt8*)GetFilePath(), &ref, &isDirectory);
	
	err = AudioFileOpen(&ref, fsRdPerm, 0, &mAudioFileID);
    if (err) {
        //NSLog(@"AudioFileOpen failed");
        return false;
    }
	
    // 開いたファイルの基本情報を fileDescription へ
    size = sizeof(AudioStreamBasicDescription);
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyDataFormat, 
							   &size, &fileDescription);
    if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	
    // 開いたファイルのデータ部のバイト数を dataSize へ
    size = sizeof(SInt64);
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyAudioDataByteCount, 
							   &size, &dataSize64);
    if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	dataSize = static_cast<UInt32>(dataSize64);
	
	AudioFileTypeID	fileTypeID;
	size = sizeof( AudioFileTypeID );
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFileFormat, &size, &fileTypeID);
	if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	
	// Instrument情報を初期化
	mInstData.basekey	= 60;
	mInstData.lowkey	= 0;
	mInstData.highkey	= 127;
	mInstData.loop		= 0;
	
	//ループポイントの取得
	Float64		st_point=0.0,end_point=0.0;
	if ( fileTypeID == kAudioFileAIFFType || fileTypeID == kAudioFileAIFCType ) {
		//INSTチャンクの取得
		AudioFileGetUserDataSize(mAudioFileID, 'INST', 0, &size);
		if ( size > 4 ) {
			UInt8	*instChunk = new UInt8[size];
			AudioFileGetUserData(mAudioFileID, 'INST', 0, &size, instChunk);
			
			//MIDI情報の取得
			mInstData.basekey = instChunk[0];
			mInstData.lowkey = instChunk[2];
			mInstData.highkey = instChunk[3];
			
			if ( instChunk[9] > 0 ) {	//ループフラグを確認
				//マーカーの取得
				UInt32	writable;
				err = AudioFileGetPropertyInfo(mAudioFileID, kAudioFilePropertyMarkerList,
											   &size, &writable);
				if (err) {
					//NSLog(@"AudioFileGetPropertyInfo failed");
					AudioFileClose(mAudioFileID);
					return NULL;
				}
				UInt8	*markersBuffer = new UInt8[size];
				AudioFileMarkerList	*markers = reinterpret_cast<AudioFileMarkerList*>(markersBuffer);
				
				err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyMarkerList, 
										   &size, markers);
				if (err) {
					//NSLog(@"AudioFileGetProperty failed");
					AudioFileClose(mAudioFileID);
					return NULL;
				}
				
				//ループポイントの設定
				for (unsigned int i=0; i<markers->mNumberMarkers; i++) {
					if (markers->mMarkers[i].mMarkerID == instChunk[11] ) {
						st_point = markers->mMarkers[i].mFramePosition;
					}
					else if (markers->mMarkers[i].mMarkerID == instChunk[13] ) {
						end_point = markers->mMarkers[i].mFramePosition;
					}
					CFRelease(markers->mMarkers[i].mName);
				}
				if ( st_point < end_point ) {
					mInstData.loop = 1;
				}
				delete [] markersBuffer;
			}
			delete [] instChunk;
		}
		
	}
	else if ( fileTypeID == kAudioFileWAVEType ) {
		//smplチャンクの取得
		AudioFileGetUserDataSize( mAudioFileID, 'smpl', 0, &size );
		if ( size >= sizeof(WAV_smpl) ) {
			UInt8	*smplChunk = new UInt8[size];
			AudioFileGetUserData( mAudioFileID, 'smpl', 0, &size, smplChunk );
			WAV_smpl	*smpl = (WAV_smpl *)smplChunk;
			
			smpl->loops = EndianU32_LtoN( smpl->loops );
			
			if ( smpl->loops > 0 ) {
				mInstData.loop = true;
				mInstData.basekey = EndianU32_LtoN( smpl->note );
				st_point = EndianU32_LtoN( smpl->start );
				end_point = EndianU32_LtoN( smpl->end ) + 1;	//SoundForge等では最終ポイントを含める解釈
				//end_point = EndianU32_LtoN( smpl->end );	//PeakではなぜかAIFFと同じ
			}
			else {
				mInstData.basekey = EndianU32_LtoN( smpl->note );
			}
			delete [] smplChunk;
		}
	}
	
	//容量の制限
	SInt64	dataSamples = dataSize / fileDescription.mBytesPerFrame;
	if ( dataSamples > MAXIMUM_SAMPLES ) {
		dataSize = MAXIMUM_SAMPLES * fileDescription.mBytesPerFrame;
	}
	if ( st_point > MAXIMUM_SAMPLES ) {
		st_point = MAXIMUM_SAMPLES;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		end_point = MAXIMUM_SAMPLES;
	}
	
    // 波形一時読み込み用メモリを確保
    char *fileBuffer;
	unsigned int	fileBufferSize;
	if (mInstData.loop) {
		fileBufferSize = dataSize+EXPAND_BUFFER*fileDescription.mBytesPerFrame;
	}
	else {
		fileBufferSize = dataSize;
	}
	fileBuffer = new char[fileBufferSize];
	memset(fileBuffer, 0, fileBufferSize);
	
	// ファイルから波形データの読み込み
	err = AudioFileReadBytes(mAudioFileID, false, 0, &dataSize, fileBuffer);
    if (err) {
        //NSLog(@"AudioFileReadBytes failed");
        AudioFileClose(mAudioFileID);
        delete [] fileBuffer;
        return false;
    }
    AudioFileClose(mAudioFileID);
	
    //ループを展開する
    Float64	adjustment = 1.0;
    outputFormat=fileDescription;
	if (mInstData.loop) {
		UInt32	plusalpha=0, framestocopy;
		while (plusalpha < EXPAND_BUFFER) {
			framestocopy = 
			(end_point-st_point)>(EXPAND_BUFFER-plusalpha)?(EXPAND_BUFFER-plusalpha):end_point-st_point;
			memcpy(fileBuffer+((int)end_point+plusalpha)*fileDescription.mBytesPerFrame,
				   fileBuffer+(int)st_point*fileDescription.mBytesPerFrame,
				   framestocopy*fileDescription.mBytesPerFrame);
			plusalpha += framestocopy;
		}
		dataSize += plusalpha*fileDescription.mBytesPerFrame;
		
		//16サンプル境界にFIXする
		adjustment = ( (long long)((end_point-st_point)/16) ) / ((end_point-st_point)/16.0);
		st_point *= adjustment;
		end_point *= adjustment;
	}
	outputFormat.mFormatID = kAudioFormatLinearPCM;
    outputFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagsNativeEndian;
	outputFormat.mChannelsPerFrame = 1;
	outputFormat.mBytesPerFrame = sizeof(float);
	outputFormat.mBitsPerChannel = 32;
	outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame;
	
    // バイトオーダー変換用のコンバータを用意
    AudioConverterRef converter;
	err = AudioConverterNew(&fileDescription, &outputFormat, &converter);
    if (err) {
        //NSLog(@"AudioConverterNew failed");
        delete [] fileBuffer;
        return false;
    }
	
	//サンプリングレート変換の質を最高に設定
//	if (fileDescription.mSampleRate != outputFormat.mSampleRate) {
//		size = sizeof(UInt32);
//		UInt32	setProp = kAudioConverterQuality_Max;
//		AudioConverterSetProperty(converter, kAudioConverterSampleRateConverterQuality,
//								  size, &setProp);
//        
//        size = sizeof(UInt32);
//		setProp = kAudioConverterSampleRateConverterComplexity_Mastering;
//		AudioConverterSetProperty(converter, kAudioConverterSampleRateConverterComplexity,
//								  size, &setProp);
//        
//	}
	
    //出力に必要十分なバッファサイズを得る
	UInt32	outputSize = dataSize;
	size = sizeof(UInt32);
	err = AudioConverterGetProperty(converter, kAudioConverterPropertyCalculateOutputBufferSize, 
									&size, &outputSize);
	if (err) {
		//NSLog(@"AudioConverterGetProperty failed");
		delete [] fileBuffer;
		AudioConverterDispose(converter);
        return false;
	}
    UInt32 monoSamples = outputSize/sizeof(float);
    
    // バイトオーダー変換
    float *monoData = new float[monoSamples];
	AudioConverterConvertBuffer(converter, dataSize, fileBuffer,
								&outputSize, monoData);
    if(outputSize == 0) {
        //NSLog(@"AudioConverterConvertBuffer failed");
        delete [] fileBuffer;
        AudioConverterDispose(converter);
        return false;
    }
    
    //ループ長が16の倍数でない場合はサンプリングレート変換
    Float64 inputSampleRate = fileDescription.mSampleRate;
    Float64 outputSampleRate = fileDescription.mSampleRate * adjustment;
    int	outSamples = monoSamples;
    if ( outputSampleRate == inputSampleRate ) {
        m_pAudioData = new short[monoSamples];
        for (int i=0; i<monoSamples; i++) {
            m_pAudioData[i] = static_cast<short>(monoData[i] * 32768);
        }
    }
    else {
        outSamples = static_cast<int>(monoSamples / (inputSampleRate / outputSampleRate));
        m_pAudioData = new short[outSamples];
        resampling(monoData, monoSamples, inputSampleRate,
                   m_pAudioData, &outSamples, outputSampleRate);
    }
    
    // 後始末
    delete [] monoData;
    delete [] fileBuffer;
    AudioConverterDispose(converter);
	
	//Instデータの設定
	if ( st_point > MAXIMUM_SAMPLES ) {
		mInstData.lp = MAXIMUM_SAMPLES;
	}
	else {
		mInstData.lp			= st_point;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		mInstData.lp_end = MAXIMUM_SAMPLES;
	}
	else {
		mInstData.lp_end		= end_point;
	}
	mInstData.srcSamplerate	= outputSampleRate;
    mLoadedSamples			= outSamples;
	
	mIsLoaded = true;
	
	return true;
#else
	//Windowsのオーディオファイル読み込み処理

	// ファイルを開く
	HMMIO	hmio = NULL;
	MMRESULT	err;
	DWORD		size;

	hmio = mmioOpen( mPath, NULL, MMIO_READ );
	if ( !hmio ) {
		return false;
	}
	
	// RIFFチャンクを探す
	MMCKINFO	riffChunkInfo;
	riffChunkInfo.fccType = mmioFOURCC('W', 'A', 'V', 'E');
	err = mmioDescend( hmio, &riffChunkInfo, NULL, MMIO_FINDRIFF );
	if ( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( (riffChunkInfo.ckid != FOURCC_RIFF) || (riffChunkInfo.fccType != mmioFOURCC('W', 'A', 'V', 'E') ) ) {
		mmioClose( hmio, 0 );
		return false;
	}

	// フォーマットチャンクを探す
	MMCKINFO	formatChunkInfo;
	formatChunkInfo.ckid = mmioFOURCC('f', 'm', 't', ' ');
	err = mmioDescend( hmio, &formatChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if ( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( formatChunkInfo.cksize < sizeof(PCMWAVEFORMAT) ) {
		mmioClose( hmio, 0 );
		return false;
	}

	//フォーマット情報を取得
	WAVEFORMATEX	pcmWaveFormat;
	DWORD			fmsize = (formatChunkInfo.cksize > sizeof(WAVEFORMATEX)) ? sizeof(WAVEFORMATEX):formatChunkInfo.cksize;
	size = mmioRead( hmio, (HPSTR)&pcmWaveFormat, fmsize );
	if ( size != fmsize ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( pcmWaveFormat.wFormatTag != WAVE_FORMAT_PCM ) {
		mmioClose( hmio, 0 );
		return false;
	}
	mmioAscend(hmio, &formatChunkInfo, 0);

	// Instrument情報を初期化
	mInstData.basekey	= 60;
	mInstData.lowkey	= 0;
	mInstData.highkey	= 127;
	mInstData.loop		= 0;

	//smplチャンクを探す
	MMCKINFO	smplChunkInfo;
	smplChunkInfo.ckid = mmioFOURCC('s', 'm', 'p', 'l');
	err = mmioDescend( hmio, &smplChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if ( err != MMSYSERR_NOERROR ) {
		smplChunkInfo.cksize = 0;
	}
	double	st_point=0.0;
	double	end_point=0.0;
	if ( smplChunkInfo.cksize >= sizeof(WAV_smpl) ) {
		//ループポイントの取得
		unsigned char	*smplChunk = new unsigned char[smplChunkInfo.cksize];
		size = mmioRead(hmio,(HPSTR)smplChunk, smplChunkInfo.cksize);
		WAV_smpl	*smpl = (WAV_smpl *)smplChunk;

		if ( smpl->loops > 0 ) {
			mInstData.loop = 1;
			mInstData.basekey = smpl->note;
			st_point = smpl->start;
			end_point = smpl->end + 1;	//SoundForge等では最終ポイントを含める解釈
		}
		else {
			mInstData.basekey = smpl->note;
		}
		delete [] smplChunk;
	}
	mmioAscend(hmio, &formatChunkInfo, 0);

	//dataチャンクを探す
	MMCKINFO dataChunkInfo;
	dataChunkInfo.ckid = mmioFOURCC('d', 'a', 't', 'a');
	err = mmioDescend( hmio, &dataChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}

	// 波形一時読み込み用メモリを確保
	unsigned int	dataSize = dataChunkInfo.cksize;
	int				bytesPerSample = pcmWaveFormat.nBlockAlign;
	char			*fileBuffer;
	unsigned int	fileBufferSize;

	//容量制限
	int	dataSamples = dataSize / pcmWaveFormat.nBlockAlign;
	if ( dataSamples > MAXIMUM_SAMPLES ) {
		dataSize = MAXIMUM_SAMPLES * pcmWaveFormat.nBlockAlign;
	}
	if ( st_point > MAXIMUM_SAMPLES ) {
		st_point = MAXIMUM_SAMPLES;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		end_point = MAXIMUM_SAMPLES;
	}
	
	
	if (mInstData.loop) {
		fileBufferSize = dataSize+EXPAND_BUFFER*bytesPerSample;
	}
	else {
		fileBufferSize = dataSize;
	}
	fileBuffer = new char[fileBufferSize];
	memset(fileBuffer, 0, fileBufferSize);
	
	// ファイルから波形データの読み込み
	size = mmioRead(hmio, (HPSTR)fileBuffer, dataSize);
	if ( size != dataSize ) {
		mmioClose( hmio, 0 );
		return false;
	}
	mmioClose(hmio,0);

	//ループを展開する
	double	inputSampleRate = pcmWaveFormat.nSamplesPerSec;
	double	outputSampleRate = inputSampleRate;
	if (mInstData.loop) {
		unsigned int	plusalpha=0;
		double			framestocopy;
		while (plusalpha < EXPAND_BUFFER) {
			framestocopy = 
			(end_point-st_point)>(EXPAND_BUFFER-plusalpha)?(EXPAND_BUFFER-plusalpha):end_point-st_point;
			memcpy(fileBuffer+((int)end_point+plusalpha)*bytesPerSample,
				   fileBuffer+(int)st_point*bytesPerSample,
				   static_cast<size_t>(framestocopy*bytesPerSample));
			plusalpha += static_cast<unsigned int>(framestocopy);
		}
		dataSize += plusalpha*bytesPerSample;
		
		//16サンプル境界にFIXする
		double	adjustment = ( (long long)((end_point-st_point)/16) ) / ((end_point-st_point)/16.0);
		outputSampleRate *= adjustment;
		st_point *= adjustment;
		end_point *= adjustment;
	}

	//一旦floatモノラルデータに変換
	int	bytesPerChannel = bytesPerSample / pcmWaveFormat.nChannels;
	unsigned int	inputPtr = 0;
	unsigned int	outputPtr = 0;
	int				monoSamples = dataSize / bytesPerSample;
	float	range = static_cast<float>((1<<(bytesPerChannel*8-1)) * pcmWaveFormat.nChannels);
	float	*monoData = new float[monoSamples];
	while (inputPtr < dataSize) {
		int	frameSum = 0;
		for (int ch=0; ch<pcmWaveFormat.nChannels; ch++) {
			for (int i=0; i<bytesPerChannel; i++) {
				if (i<bytesPerChannel-1) {
					frameSum += (unsigned char)fileBuffer[inputPtr] << (8*i);
				}
				else {
					frameSum += fileBuffer[inputPtr] << (8*i);
				}
				inputPtr++;
			}
		}
		monoData[outputPtr] = frameSum / range;
		outputPtr++;
	}

	//ループ長が16の倍数でない場合はサンプリングレート変換
	int	outSamples = monoSamples;
	if ( outputSampleRate == inputSampleRate ) {
		m_pAudioData = new short[monoSamples];
		for (int i=0; i<monoSamples; i++) {
			m_pAudioData[i] = static_cast<short>(monoData[i] * 32768);
		}
	}
	else {
		outSamples = static_cast<int>(monoSamples / (inputSampleRate / outputSampleRate));
		m_pAudioData = new short[outSamples];
		resampling(monoData, monoSamples, inputSampleRate,
				   m_pAudioData, &outSamples, outputSampleRate);
	}

	// 後始末
	delete [] fileBuffer;
	delete [] monoData;

	//Instデータの設定
	mInstData.lp			= static_cast<int>(st_point);
	mInstData.lp_end		= static_cast<int>(end_point);
	mInstData.srcSamplerate	= outputSampleRate;
    mLoadedSamples			= outSamples;

	mIsLoaded = true;

	return true;
#endif
}
Пример #24
0
 AACEncode::~AACEncode() {
     
     AudioConverterDispose(m_audioConverter);
 }
Пример #25
0
static ALCenum ca_open_capture(ALCdevice *device, const ALCchar *deviceName)
{
    AudioStreamBasicDescription requestedFormat;  // The application requested format
    AudioStreamBasicDescription hardwareFormat;   // The hardware format
    AudioStreamBasicDescription outputFormat;     // The AudioUnit output format
    AURenderCallbackStruct input;
    ComponentDescription desc;
    AudioDeviceID inputDevice;
    UInt32 outputFrameCount;
    UInt32 propertySize;
    UInt32 enableIO;
    Component comp;
    ca_data *data;
    OSStatus err;

    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_HALOutput;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;

    // Search for component with given description
    comp = FindNextComponent(NULL, &desc);
    if(comp == NULL)
    {
        ERR("FindNextComponent failed\n");
        return ALC_INVALID_VALUE;
    }

    data = calloc(1, sizeof(*data));
    device->ExtraData = data;

    // Open the component
    err = OpenAComponent(comp, &data->audioUnit);
    if(err != noErr)
    {
        ERR("OpenAComponent failed\n");
        goto error;
    }

    // Turn off AudioUnit output
    enableIO = 0;
    err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(ALuint));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // Turn on AudioUnit input
    enableIO = 1;
    err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(ALuint));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // Get the default input device
    propertySize = sizeof(AudioDeviceID);
    err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &inputDevice);
    if(err != noErr)
    {
        ERR("AudioHardwareGetProperty failed\n");
        goto error;
    }

    if(inputDevice == kAudioDeviceUnknown)
    {
        ERR("No input device found\n");
        goto error;
    }

    // Track the input device
    err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(AudioDeviceID));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // set capture callback
    input.inputProc = ca_capture_callback;
    input.inputProcRefCon = device;

    err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(AURenderCallbackStruct));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // Initialize the device
    err = AudioUnitInitialize(data->audioUnit);
    if(err != noErr)
    {
        ERR("AudioUnitInitialize failed\n");
        goto error;
    }

    // Get the hardware format
    propertySize = sizeof(AudioStreamBasicDescription);
    err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &hardwareFormat, &propertySize);
    if(err != noErr || propertySize != sizeof(AudioStreamBasicDescription))
    {
        ERR("AudioUnitGetProperty failed\n");
        goto error;
    }

    // Set up the requested format description
    switch(device->FmtType)
    {
        case DevFmtUByte:
            requestedFormat.mBitsPerChannel = 8;
            requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked;
            break;
        case DevFmtShort:
            requestedFormat.mBitsPerChannel = 16;
            requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
            break;
        case DevFmtInt:
            requestedFormat.mBitsPerChannel = 32;
            requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
            break;
        case DevFmtFloat:
            requestedFormat.mBitsPerChannel = 32;
            requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked;
            break;
        case DevFmtByte:
        case DevFmtUShort:
        case DevFmtUInt:
            ERR("%s samples not supported\n", DevFmtTypeString(device->FmtType));
            goto error;
    }

    switch(device->FmtChans)
    {
        case DevFmtMono:
            requestedFormat.mChannelsPerFrame = 1;
            break;
        case DevFmtStereo:
            requestedFormat.mChannelsPerFrame = 2;
            break;

        case DevFmtQuad:
        case DevFmtX51:
        case DevFmtX51Side:
        case DevFmtX61:
        case DevFmtX71:
            ERR("%s not supported\n", DevFmtChannelsString(device->FmtChans));
            goto error;
    }

    requestedFormat.mBytesPerFrame = requestedFormat.mChannelsPerFrame * requestedFormat.mBitsPerChannel / 8;
    requestedFormat.mBytesPerPacket = requestedFormat.mBytesPerFrame;
    requestedFormat.mSampleRate = device->Frequency;
    requestedFormat.mFormatID = kAudioFormatLinearPCM;
    requestedFormat.mReserved = 0;
    requestedFormat.mFramesPerPacket = 1;

    // save requested format description for later use
    data->format = requestedFormat;
    data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType);

    // Use intermediate format for sample rate conversion (outputFormat)
    // Set sample rate to the same as hardware for resampling later
    outputFormat = requestedFormat;
    outputFormat.mSampleRate = hardwareFormat.mSampleRate;

    // Determine sample rate ratio for resampling
    data->sampleRateRatio = outputFormat.mSampleRate / device->Frequency;

    // The output format should be the requested format, but using the hardware sample rate
    // This is because the AudioUnit will automatically scale other properties, except for sample rate
    err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, (void *)&outputFormat, sizeof(outputFormat));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // Set the AudioUnit output format frame count
    outputFrameCount = device->UpdateSize * data->sampleRateRatio;
    err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Output, 0, &outputFrameCount, sizeof(outputFrameCount));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed: %d\n", err);
        goto error;
    }

    // Set up sample converter
    err = AudioConverterNew(&outputFormat, &requestedFormat, &data->audioConverter);
    if(err != noErr)
    {
        ERR("AudioConverterNew failed: %d\n", err);
        goto error;
    }

    // Create a buffer for use in the resample callback
    data->resampleBuffer = malloc(device->UpdateSize * data->frameSize * data->sampleRateRatio);

    // Allocate buffer for the AudioUnit output
    data->bufferList = allocate_buffer_list(outputFormat.mChannelsPerFrame, device->UpdateSize * data->frameSize * data->sampleRateRatio);
    if(data->bufferList == NULL)
        goto error;

    data->ring = CreateRingBuffer(data->frameSize, (device->UpdateSize * data->sampleRateRatio) * device->NumUpdates);
    if(data->ring == NULL)
        goto error;

    al_string_copy_cstr(&device->DeviceName, deviceName);

    return ALC_NO_ERROR;

error:
    DestroyRingBuffer(data->ring);
    free(data->resampleBuffer);
    destroy_buffer_list(data->bufferList);

    if(data->audioConverter)
        AudioConverterDispose(data->audioConverter);
    if(data->audioUnit)
        CloseComponent(data->audioUnit);

    free(data);
    device->ExtraData = NULL;

    return ALC_INVALID_VALUE;
}
Пример #26
0
 AudioMixer::~AudioMixer()
 {
     for ( auto & it : m_converters ) {
         AudioConverterDispose(it.second.converter);
     }
 }
Пример #27
0
/* This is called by audio file stream parser when it finds property values */
void Audio_Stream::propertyValueCallback(void *inClientData, AudioFileStreamID inAudioFileStream, AudioFileStreamPropertyID inPropertyID, UInt32 *ioFlags)
{
    AS_TRACE("%s\n", __PRETTY_FUNCTION__);
    Audio_Stream *THIS = static_cast<Audio_Stream*>(inClientData);
    
    if (!THIS->m_audioStreamParserRunning) {
        AS_TRACE("%s: stray callback detected!\n", __PRETTY_FUNCTION__);
        return;
    }
    
    switch (inPropertyID) {
        case kAudioFileStreamProperty_DataOffset: {
            SInt64 offset;
            UInt32 offsetSize = sizeof(offset);
            OSStatus result = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataOffset, &offsetSize, &offset);
            if (result == 0) {
                THIS->m_dataOffset = offset;
            } else {
                AS_TRACE("%s: reading kAudioFileStreamProperty_DataOffset property failed\n", __PRETTY_FUNCTION__);
            }
            
            break;
        }
        case kAudioFileStreamProperty_AudioDataByteCount: {
            UInt32 byteCountSize = sizeof(UInt64);
            OSStatus err = AudioFileStreamGetProperty(inAudioFileStream,
                                                      kAudioFileStreamProperty_AudioDataByteCount,
                                                      &byteCountSize, &THIS->m_audioDataByteCount);
            if (err) {
                THIS->m_audioDataByteCount = 0;
            }
            break;
        }
        case kAudioFileStreamProperty_ReadyToProducePackets: {
            memset(&(THIS->m_srcFormat), 0, sizeof m_srcFormat);
            UInt32 asbdSize = sizeof(m_srcFormat);
            OSStatus err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataFormat, &asbdSize, &(THIS->m_srcFormat));
            if (err) {
                AS_TRACE("Unable to set the src format\n");
                break;
            }
            
            THIS->m_packetDuration = THIS->m_srcFormat.mFramesPerPacket / THIS->m_srcFormat.mSampleRate;
            
            AS_TRACE("srcFormat, bytes per packet %i\n", (unsigned int)THIS->m_srcFormat.mBytesPerPacket);
            
            if (THIS->m_audioConverter) {
                AudioConverterDispose(THIS->m_audioConverter);
            }
            
            err = AudioConverterNew(&(THIS->m_srcFormat),
                                    &(THIS->m_dstFormat),
                                    &(THIS->m_audioConverter));
            
            if (err) {
                AS_TRACE("Error in creating an audio converter, error %i\n", err);
                
                THIS->m_initializationError = err;
            }
            
            THIS->setCookiesForStream(inAudioFileStream);
            
            THIS->audioQueue()->handlePropertyChange(inAudioFileStream, inPropertyID, ioFlags);
            break;
        }
        default: {
            THIS->audioQueue()->handlePropertyChange(inAudioFileStream, inPropertyID, ioFlags);
            break;
        }
    }
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate, UInt32 outputBitRate)
{
	AudioFileID         sourceFileID = 0;
    AudioFileID         destinationFileID = 0;
    AudioConverterRef   converter = NULL;
    Boolean             canResumeFromInterruption = true; // we can continue unless told otherwise
    
    CAStreamBasicDescription srcFormat, dstFormat;
    AudioFileIO afio = {};
    
    char                         *outputBuffer = NULL;
    AudioStreamPacketDescription *outputPacketDescriptions = NULL;
    
    OSStatus error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    
    printf("\nDoConvertFile\n");
    
    try {
        // get the source file
        XThrowIfError(AudioFileOpenURL(sourceURL, kAudioFileReadPermission, 0, &sourceFileID), "AudioFileOpenURL failed");
	
        // get the source data format
        UInt32 size = sizeof(srcFormat);
        XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyDataFormat, &size, &srcFormat), "couldn't get source data format");
        
        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if the output format is PC create a 16-bit int PCM file format description as an example
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("Source File format: "); srcFormat.Print();
        printf("Destination format: "); dstFormat.Print();
	
        // create the AudioConverter
        
        XThrowIfError(AudioConverterNew(&srcFormat, &dstFormat, &converter), "AudioConverterNew failed!");
    
        // if the source has a cookie, get it and set it on the Audio Converter
        ReadCookie(sourceFileID, converter);

        // get the actual formats back from the Audio Converter
        size = sizeof(srcFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentInputStreamDescription, &size, &srcFormat), "AudioConverterGetProperty kAudioConverterCurrentInputStreamDescription failed!");

        size = sizeof(dstFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentOutputStreamDescription, &size, &dstFormat), "AudioConverterGetProperty kAudioConverterCurrentOutputStreamDescription failed!");

        printf("Formats returned from AudioConverter:\n");
        printf("              Source format: "); srcFormat.Print();
        printf("    Destination File format: "); dstFormat.Print();
        
        // if encoding to AAC set the bitrate
        // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
        // when you explicitly set the bit rate and the sample rate, this tells the encoder to stick with both bit rate and sample rate
        //     but there are combinations (also depending on the number of channels) which will not be allowed
        // if you do not explicitly set a bit rate the encoder will pick the correct value for you depending on samplerate and number of channels
        // bit rate also scales with the number of channels, therefore one bit rate per sample rate can be used for mono cases
        //    and if you have stereo or more, you can multiply that number by the number of channels.
        
        if (outputBitRate == 0) {
            outputBitRate = 192000; // 192kbs
        }
        
        if (dstFormat.mFormatID == kAudioFormatMPEG4AAC) {
            
            UInt32 propSize = sizeof(outputBitRate);
            
            // set the bit rate depending on the samplerate chosen
            XThrowIfError(AudioConverterSetProperty(converter, kAudioConverterEncodeBitRate, propSize, &outputBitRate),
                           "AudioConverterSetProperty kAudioConverterEncodeBitRate failed!");
            
            // get it back and print it out
            AudioConverterGetProperty(converter, kAudioConverterEncodeBitRate, &propSize, &outputBitRate);
            printf ("AAC Encode Bitrate: %u\n", (unsigned int)outputBitRate);
        }

        // can the Audio Converter resume conversion after an interruption?
        // this property may be queried at any time after construction of the Audio Converter after setting its output format
        // there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(converter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported - see comments in source for more info.\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld, paramErr is OK if PCM\n", error);
            }
            
            error = noErr;
        }
        
        // create the destination file 
        XThrowIfError(AudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, kAudioFileFlags_EraseFile, &destinationFileID), "AudioFileCreateWithURL failed!");

        // set up source buffers and data proc info struct
        afio.srcFileID = sourceFileID;
        afio.srcBufferSize = 32768;
        afio.srcBuffer = new char [afio.srcBufferSize];
        afio.srcFilePos = 0;
        afio.srcFormat = srcFormat;
		
        if (srcFormat.mBytesPerPacket == 0) {
            // if the source format is VBR, we need to get the maximum packet size
            // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
            // in the file (without actually scanning the whole file to find the largest packet,
            // as may happen with kAudioFilePropertyMaximumPacketSize)
            size = sizeof(afio.srcSizePerPacket);
            XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyPacketSizeUpperBound, &size, &afio.srcSizePerPacket), "AudioFileGetProperty kAudioFilePropertyPacketSizeUpperBound failed!");
            
            // how many packets can we read for our buffer size?
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            afio.packetDescriptions = new AudioStreamPacketDescription [afio.numPacketsPerRead];
        } else {
            // CBR source format
            afio.srcSizePerPacket = srcFormat.mBytesPerPacket;
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            afio.packetDescriptions = NULL;
        }

        // set up output buffers
        UInt32 outputSizePerPacket = dstFormat.mBytesPerPacket; // this will be non-zero if the format is CBR
        UInt32 theOutputBufSize = 32768;
        outputBuffer = new char[theOutputBufSize];
        
        if (outputSizePerPacket == 0) {
            // if the destination format is VBR, we need to get max size per packet from the converter
            size = sizeof(outputSizePerPacket);
            XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterPropertyMaximumOutputPacketSize, &size, &outputSizePerPacket), "AudioConverterGetProperty kAudioConverterPropertyMaximumOutputPacketSize failed!");
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
        }
        UInt32 numOutputPackets = theOutputBufSize / outputSizePerPacket;

        // if the destination format has a cookie, get it and set it on the output file
        WriteCookie(converter, destinationFileID);

        // write destination channel layout
        if (srcFormat.mChannelsPerFrame > 2) {
            WriteDestinationChannelLayout(converter, sourceFileID, destinationFileID);
        }

        UInt64 totalOutputFrames = 0; // used for debgging printf
        SInt64 outputFilePos = 0;
        
        // loop to convert data
        printf("Converting...\n");
        while (1) {

            // set up output buffer list
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = dstFormat.mChannelsPerFrame;
            fillBufList.mBuffers[0].mDataByteSize = theOutputBufSize;
            fillBufList.mBuffers[0].mData = outputBuffer;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = NO;
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the Audio Converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            // convert data
            UInt32 ioOutputDataPackets = numOutputPackets;
            printf("AudioConverterFillComplexBuffer...\n");
            error = AudioConverterFillComplexBuffer(converter, EncoderDataProc, &afio, &ioOutputDataPackets, &fillBufList, outputPacketDescriptions);
            // if interrupted in the process of the conversion call, we must handle the error appropriately
            if (error) {
                if (kAudioConverterErr_HardwareInUse == error) {
                     printf("Audio Converter returned kAudioConverterErr_HardwareInUse!\n");
                } else {
                    XThrowIfError(error, "AudioConverterFillComplexBuffer error!");
                }
            } else {
                if (ioOutputDataPackets == 0) {
                    // this is the EOF conditon
                    error = noErr;
                    break;
                }
            }
            
            if (noErr == error) {
                // write to output file
                UInt32 inNumBytes = fillBufList.mBuffers[0].mDataByteSize;
                XThrowIfError(AudioFileWritePackets(destinationFileID, false, inNumBytes, outputPacketDescriptions, outputFilePos, &ioOutputDataPackets, outputBuffer), "AudioFileWritePackets failed!");
            
                printf("Convert Output: Write %lu packets at position %lld, size: %ld\n", ioOutputDataPackets, outputFilePos, inNumBytes);
                
                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.mFramesPerPacket) { 
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.mFramesPerPacket);
                } else if (outputPacketDescriptions != NULL) {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (UInt32 i = 0; i < ioOutputDataPackets; ++i)
                        totalOutputFrames += outputPacketDescriptions[i].mVariableFramesInPacket;
                }
            }
        } // while

        if (noErr == error) {
            // write out any of the leading and trailing frames for compressed formats only
            if (dstFormat.mBitsPerChannel == 0) {
                // our output frame count should jive with
                printf("Total number of output frames counted: %lld\n", totalOutputFrames); 
                WritePacketTableInfo(converter, destinationFileID);
            }
        
            // write the cookie again - sometimes codecs will update cookies at the end of a conversion
            WriteCookie(converter, destinationFileID);
        }
    }
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // cleanup
    if (converter) AudioConverterDispose(converter);
    if (destinationFileID) AudioFileClose(destinationFileID);
	if (sourceFileID) AudioFileClose(sourceFileID);
    
    if (afio.srcBuffer) delete [] afio.srcBuffer;
    if (afio.packetDescriptions) delete [] afio.packetDescriptions;
    if (outputBuffer) delete [] outputBuffer;
    if (outputPacketDescriptions) delete [] outputPacketDescriptions;
    
    
    return error;
}
Пример #29
0
bool FCoreAudioSoundSource::DetachFromAUGraph()
{
	AURenderCallbackStruct Input;
	Input.inputProc = NULL;
	Input.inputProcRefCon = NULL;
	SAFE_CA_CALL( AudioUnitSetProperty( SourceUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &Input, sizeof( Input ) ) );

	if( StreamSplitterNode )
	{
		SAFE_CA_CALL( AUGraphDisconnectNodeInput( AudioDevice->GetAudioUnitGraph(), StreamSplitterNode, 0 ) );
	}
	if( ReverbNode )
	{
		SAFE_CA_CALL( AUGraphDisconnectNodeInput( AudioDevice->GetAudioUnitGraph(), ReverbNode, 0 ) );
	}
	if( RadioNode )
	{
		SAFE_CA_CALL( AUGraphDisconnectNodeInput( AudioDevice->GetAudioUnitGraph(), RadioNode, 0 ) );
	}
	if( EQNode )
	{
		SAFE_CA_CALL( AUGraphDisconnectNodeInput( AudioDevice->GetAudioUnitGraph(), EQNode, 0 ) );
	}
	if( StreamMergerNode )
	{
		SAFE_CA_CALL( AUGraphDisconnectNodeInput( AudioDevice->GetAudioUnitGraph(), StreamMergerNode, 0 ) );
	}

	if( AudioChannel )
	{
		if( Buffer->NumChannels < 3 )
		{
			SAFE_CA_CALL( AUGraphDisconnectNodeInput( AudioDevice->GetAudioUnitGraph(), AudioDevice->GetMixer3DNode(), MixerInputNumber ) );
			AudioDevice->SetFreeMixer3DInput( MixerInputNumber );
		}
		else
		{
			SAFE_CA_CALL( AUGraphDisconnectNodeInput( AudioDevice->GetAudioUnitGraph(), AudioDevice->GetMatrixMixerNode(), MixerInputNumber ) );
			AudioDevice->SetFreeMatrixMixerInput( MixerInputNumber );
		}
	}

	if( StreamMergerNode )
	{
		SAFE_CA_CALL( AUGraphRemoveNode( AudioDevice->GetAudioUnitGraph(), StreamMergerNode ) );
	}
	if( EQNode )
	{
		SAFE_CA_CALL( AUGraphRemoveNode( AudioDevice->GetAudioUnitGraph(), EQNode ) );
	}
	if( RadioNode )
	{
		SAFE_CA_CALL( AUGraphRemoveNode( AudioDevice->GetAudioUnitGraph(), RadioNode ) );
	}
	if( ReverbNode )
	{
		SAFE_CA_CALL( AUGraphRemoveNode( AudioDevice->GetAudioUnitGraph(), ReverbNode ) );
	}
	if( StreamSplitterNode )
	{
		SAFE_CA_CALL( AUGraphRemoveNode( AudioDevice->GetAudioUnitGraph(), StreamSplitterNode ) );
	}
	if( AudioChannel )
	{
		SAFE_CA_CALL( AUGraphRemoveNode( AudioDevice->GetAudioUnitGraph(), SourceNode ) );
	}

	SAFE_CA_CALL( AUGraphUpdate( AudioDevice->GetAudioUnitGraph(), NULL ) );

	AudioConverterDispose( CoreAudioConverter );
	CoreAudioConverter = NULL;

	StreamMergerNode = 0;
	StreamMergerUnit = NULL;
	EQNode = 0;
	EQUnit = NULL;
	RadioNode = 0;
	RadioUnit = NULL;
	ReverbNode = 0;
	ReverbUnit = NULL;
	StreamSplitterNode = 0;
	StreamSplitterUnit = NULL;
	SourceNode = 0;
	SourceUnit = NULL;
	MixerInputNumber = -1;
	
	GAudioChannels[AudioChannel] = NULL;
	AudioChannel = 0;
	
	return true;
}
Пример #30
0
static GVBool gviHardwareInitPlayback(GVIDevice * device)
{
	GVIHardwareData * data = (GVIHardwareData *)device->m_data;
	UInt32 size;
	OSStatus result;
	UInt32 primeMethod;
	SInt32 channelMap[100];
	int i;

	// create the array of sources
	data->m_playbackSources = gviNewSourceList();
	if(!data->m_playbackSources)
		return GVFalse;

	// get the playback format
	size = sizeof(AudioStreamBasicDescription);
	result = AudioDeviceGetProperty(device->m_deviceID, 0, false, kAudioDevicePropertyStreamFormat, &size, &data->m_playbackStreamDescriptor);
	if(result != noErr)
	{
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// create a converter from the GV format to the playback format
	result = AudioConverterNew(&GVIVoiceFormat, &data->m_playbackStreamDescriptor, &data->m_playbackConverter);
	if(result != noErr)
	{
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// set it to do no priming
	primeMethod = kConverterPrimeMethod_None;
	result = AudioConverterSetProperty(data->m_playbackConverter, kAudioConverterPrimeMethod, sizeof(UInt32), &primeMethod);
	if(result != noErr)
	{
		AudioConverterDispose(data->m_playbackConverter);
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// setup the converter to map the input channel to all output channels
	result = AudioConverterGetPropertyInfo(data->m_playbackConverter, kAudioConverterChannelMap, &size, NULL);
	if(result == noErr)
	{
		result = AudioConverterGetProperty(data->m_playbackConverter, kAudioConverterChannelMap, &size, channelMap);
		if(result == noErr)
		{
			for(i = 0 ; i < (size / sizeof(SInt32)) ; i++)
				channelMap[i] = 0;

			AudioConverterSetProperty(data->m_playbackConverter, kAudioConverterChannelMap, size, channelMap);
		}
	}

	// allocate the playback buffer
	data->m_playbackBuffer = (GVSample *)gsimalloc(GVIBytesPerFrame);
	if(!data->m_playbackBuffer)
	{
		AudioConverterDispose(data->m_playbackConverter);
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// add property listener
	AudioDeviceAddPropertyListener(device->m_deviceID, 0, false, kAudioDevicePropertyDeviceIsAlive, gviPropertyListener, device);

#if GVI_VOLUME_IN_SOFTWARE
	// init volume
	data->m_playbackVolume = (GVScalar)1.0;
#endif

	return GVTrue;
}