Beispiel #1
0
TargetFileCoreAudio::TargetFileCoreAudio( const DataTargetRef &dataTarget, size_t sampleRate, size_t numChannels, SampleType sampleType, const std::string &extension )
    : TargetFile( dataTarget, sampleRate, numChannels, sampleType )
{
    ::CFURLRef targetUrl = ci::cocoa::createCfUrl( Url( dataTarget->getFilePath().string() ) );
    ::AudioFileTypeID fileType = getFileTypeIdFromExtension( extension );

    ::AudioStreamBasicDescription fileAsbd;
    switch( mSampleType ) {
    case SampleType::INT_16:
        fileAsbd = createInt16Asbd( mSampleRate, mNumChannels, true );
        break;
    case SampleType::FLOAT_32:
        fileAsbd = createFloatAsbd( mSampleRate, mNumChannels, true );
        break;
    default:
        CI_ASSERT_NOT_REACHABLE();
    }

    ::ExtAudioFileRef audioFile;
    OSStatus status = ::ExtAudioFileCreateWithURL( targetUrl, fileType, &fileAsbd, nullptr, kAudioFileFlags_EraseFile, &audioFile );
    if( status == kAudioFileUnsupportedDataFormatError )
        throw AudioFileExc( string( "Extended Audio File Services: Unsupported format error, target file: " ) + dataTarget->getFilePath().string(), (int32_t)status );
    else if( status != noErr )
        throw AudioFileExc( string( "Extended Audio File Services faild: target file: " ) + dataTarget->getFilePath().string(), (int32_t)status );

    ::CFRelease( targetUrl );
    mExtAudioFile = ExtAudioFilePtr( audioFile );

    ::AudioStreamBasicDescription clientAsbd = createFloatAsbd( mSampleRate, mNumChannels, false );

    status = ::ExtAudioFileSetProperty( mExtAudioFile.get(), kExtAudioFileProperty_ClientDataFormat, sizeof( clientAsbd ), &clientAsbd );
    CI_VERIFY( status == noErr );

    mBufferList = createNonInterleavedBufferListShallow( mNumChannels );
}
Beispiel #2
0
size_t SourceFileOggVorbis::performRead( Buffer *buffer, size_t bufferFrameOffset, size_t numFramesNeeded )
{
	CI_ASSERT( buffer->getNumFrames() >= bufferFrameOffset + numFramesNeeded );

	size_t readCount = 0;
	while( readCount < numFramesNeeded ) {
		float **outChannels;
		int section;

		long outNumFrames = ov_read_float( &mOggVorbisFile, &outChannels, int( numFramesNeeded - readCount ), &section );
		if( outNumFrames <= 0 ) {
			if( outNumFrames < 0 )
				throw AudioFileExc( "ov_read_float error", (int32_t)outNumFrames );

			break;
		}

		size_t offset = bufferFrameOffset + readCount;
		for( size_t ch = 0; ch < mNumChannels; ch++ )
			memcpy( buffer->getChannel( ch ) + offset, outChannels[ch], outNumFrames * sizeof( float ) );

		readCount += outNumFrames;
	}

	return static_cast<size_t>( readCount );
}
Beispiel #3
0
void SourceFileOggVorbis::init()
{
	CI_ASSERT( mDataSource );
#if ! defined( CINDER_ANDROID )
	if( mDataSource->isFilePath() ) {
		int status = ov_fopen( mDataSource->getFilePath().string().c_str(), &mOggVorbisFile );
		if( status )
			throw AudioFileExc( string( "Failed to open Ogg Vorbis file with error: " ), (int32_t)status );
	}
	else {
#endif
		mStream = mDataSource->createStream();

		ov_callbacks callbacks;
		callbacks.read_func = readFn;
		callbacks.seek_func = seekFn;
		callbacks.close_func = closeFn;
		callbacks.tell_func = tellFn;

		int status = ov_open_callbacks( this, &mOggVorbisFile, NULL, 0, callbacks );
		CI_VERIFY( status == 0 );

#if ! defined( CINDER_ANDROID )
	}
#endif

	vorbis_info *info = ov_info( &mOggVorbisFile, -1 );
    mNumChannels = info->channels;
    mSampleRate = info->rate;

	ogg_int64_t totalFrames = ov_pcm_total( &mOggVorbisFile, -1 );
    mNumFrames = mFileNumFrames = static_cast<uint32_t>( totalFrames );
}
Beispiel #4
0
::AudioFileTypeID TargetFileCoreAudio::getFileTypeIdFromExtension( const std::string &ext )
{
    if( ext == "aiff" )
        return kAudioFileAIFFType;
    else if( ext == "wav" )
        return kAudioFileWAVEType;
    else if( ext == "mp3" )
        return kAudioFileMP3Type;
    else if( ext == "m4a" )
        return kAudioFileM4AType;
    else if( ext == "aac" )
        return kAudioFileAAC_ADTSType;

    throw AudioFileExc( string( "TargetFileCoreAudio: unexpected extension '" ) + ext + "'" );
}
Beispiel #5
0
void SourceFileCoreAudio::initImpl()
{
    ::CFURLRef fileUrl = ci::cocoa::createCfUrl( Url( mDataSource->getFilePath().string() ) );
    ::ExtAudioFileRef audioFile;
    OSStatus status = ::ExtAudioFileOpenURL( fileUrl, &audioFile );
    if( status != noErr ) {
        string urlString = ci::cocoa::convertCfString( CFURLGetString( fileUrl ) );
        throw AudioFileExc( string( "could not open audio source file: " ) + urlString, (int32_t)status );
    }

    mExtAudioFile = ExtAudioFilePtr( audioFile );

    ::AudioStreamBasicDescription fileFormat;
    UInt32 propSize = sizeof( fileFormat );
    status = ::ExtAudioFileGetProperty( audioFile, kExtAudioFileProperty_FileDataFormat, &propSize, &fileFormat );
    CI_VERIFY( status == noErr );

    mNumChannels = fileFormat.mChannelsPerFrame;
    mSampleRateNative = fileFormat.mSampleRate;

    size_t outputSampleRate = getSampleRate(); // may be 0 at init
    if( ! outputSampleRate )
        outputSampleRate = mSampleRateNative;

    SInt64 numFrames;
    propSize = sizeof( numFrames );
    status = ::ExtAudioFileGetProperty( audioFile, kExtAudioFileProperty_FileLengthFrames, &propSize, &numFrames );
    CI_VERIFY( status == noErr );
    mNumFrames = mFileNumFrames = static_cast<size_t>( numFrames );

    ::AudioStreamBasicDescription outputFormat = audio::cocoa::createFloatAsbd( outputSampleRate, mNumChannels );
    status = ::ExtAudioFileSetProperty( mExtAudioFile.get(), kExtAudioFileProperty_ClientDataFormat, sizeof( outputFormat ), &outputFormat );
    CI_VERIFY( status == noErr );

    // numFrames will be updated at read time
    mBufferList = createNonInterleavedBufferListShallow( mNumChannels );
}
TargetFileMediaFoundation::TargetFileMediaFoundation( const DataTargetRef &dataTarget, size_t sampleRate, size_t numChannels, SampleType sampleType, const std::string &extension )
	: TargetFile( dataTarget, sampleRate, numChannels, sampleType ), mStreamIndex( 0 )
{
	MediaFoundationInitializer::initMediaFoundation();

	::IMFSinkWriter *sinkWriter;
	HRESULT hr = ::MFCreateSinkWriterFromURL( dataTarget->getFilePath().wstring().c_str(), NULL, NULL, &sinkWriter );
	if( hr != S_OK ) {
		string errorString = string( "TargetFileMediaFoundation: Failed to create SinkWriter from URL: " ) +  dataTarget->getFilePath().string(); 
		if( hr == HRESULT_FROM_WIN32( ERROR_FILE_NOT_FOUND ) )
			errorString += ", file not found.";
		throw AudioFileExc( errorString );
	}

	mSinkWriter = ci::msw::makeComUnique( sinkWriter );

	mSampleSize = getBytesPerSample( mSampleType );
	const UINT32 bitsPerSample = 8 * mSampleSize;
	const WORD blockAlignment = mNumChannels * mSampleSize;
	const DWORD averageBytesPerSecond = mSampleRate * blockAlignment;

	// Set the output media type.

	IMFMediaType *mediaType;
	hr = ::MFCreateMediaType( &mediaType );
	CI_ASSERT( hr == S_OK );
	auto mediaTypeManaged = ci::msw::makeComUnique( mediaType );

	hr = mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
	CI_ASSERT( hr == S_OK );

	const ::GUID audioFormat = getMfAudioFormat( mSampleType );
	hr = mediaType->SetGUID( MF_MT_SUBTYPE, audioFormat );
	CI_ASSERT( hr == S_OK );

	hr = mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, (UINT32)mSampleRate );
	CI_ASSERT( hr == S_OK );

	hr = mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
	CI_ASSERT( hr == S_OK );

	hr = mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, blockAlignment );
	CI_ASSERT( hr == S_OK );

	hr = mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, averageBytesPerSecond );
	CI_ASSERT( hr == S_OK );

	hr = mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, (UINT32)mNumChannels );
	CI_ASSERT( hr == S_OK );

	hr = mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, 1 );
	CI_ASSERT( hr == S_OK );

	hr = mediaType->SetUINT32( MF_MT_FIXED_SIZE_SAMPLES, 1 );
	CI_ASSERT( hr == S_OK );

	hr = mSinkWriter->AddStream( mediaType, &mStreamIndex );
	CI_ASSERT( hr == S_OK );

	// Tell the sink writer to start accepting data.
	hr = mSinkWriter->BeginWriting();
	CI_ASSERT( hr == S_OK );
}
// TODO: test setting MF_LOW_LATENCY attribute
void SourceFileMediaFoundation::initReader()
{
	CI_ASSERT( mDataSource );
	mFramesRemainingInReadBuffer = 0;

	::IMFAttributes *attributes;
	HRESULT hr = ::MFCreateAttributes( &attributes, 1 );
	CI_ASSERT( hr == S_OK );
	auto attributesPtr = ci::msw::makeComUnique( attributes );

	::IMFSourceReader *sourceReader;

	if( mDataSource->isFilePath() ) {
		hr = ::MFCreateSourceReaderFromURL( mDataSource->getFilePath().wstring().c_str(), attributesPtr.get(), &sourceReader );
		if( hr != S_OK ) {
			string errorString = string( "SourceFileMediaFoundation: Failed to create SourceReader from URL: " ) +  mDataSource->getFilePath().string(); 
			if( hr == HRESULT_FROM_WIN32( ERROR_FILE_NOT_FOUND ) )
				errorString += ", file not found.";
			throw AudioFileExc( errorString );
		}
	}
	else {
		mComIStream = ci::msw::makeComUnique( new ci::msw::ComIStream( mDataSource->createStream() ) );
		::IMFByteStream *byteStream;
		hr = ::MFCreateMFByteStreamOnStream( mComIStream.get(), &byteStream );
		CI_ASSERT( hr == S_OK );
		mByteStream = ci::msw::makeComUnique( byteStream );

		hr = ::MFCreateSourceReaderFromByteStream( byteStream, attributesPtr.get(), &sourceReader );
		if( hr != S_OK )
			throw AudioFileExc( "SourceFileMediaFoundation: Failed to create SourceReader from resource." );
	}

	mSourceReader = ci::msw::makeComUnique( sourceReader );

	// get files native format
	::IMFMediaType *nativeType;
	::WAVEFORMATEX *nativeFormat;
	UINT32 formatSize;
	hr = mSourceReader->GetNativeMediaType( MF_SOURCE_READER_FIRST_AUDIO_STREAM, 0, &nativeType );
	CI_ASSERT( hr == S_OK );
	auto nativeTypeUnique = ci::msw::makeComUnique( nativeType );

	hr = ::MFCreateWaveFormatExFromMFMediaType( nativeType, &nativeFormat, &formatSize );
	CI_ASSERT( hr == S_OK );

	mNumChannels = nativeFormat->nChannels;
	mSampleRate = nativeFormat->nSamplesPerSec;

	GUID outputSubType = MFAudioFormat_PCM; // default to PCM 16-bit int, upgrade if we can.
	mSampleType = SampleType::INT_16;

	if( nativeFormat->wBitsPerSample == 32 ) {
		mSampleType = SampleType::FLOAT_32;
		outputSubType = MFAudioFormat_Float;
	}
	else if( nativeFormat->wBitsPerSample == 24 ) {
		mSampleType = SampleType::INT_24;
		if( mNumChannels > 1 )
			mBitConverterBuffer.setSize( getMaxFramesPerRead(), mNumChannels );
	}

	::CoTaskMemFree( nativeFormat );

	mBytesPerSample = getBytesPerSample( mSampleType );
	mReadBuffer.setSize( getMaxFramesPerRead(), mNumChannels );

	// set output type, which loads the proper decoder:
	::IMFMediaType *outputType;
	hr = ::MFCreateMediaType( &outputType );
	auto outputTypeRef = ci::msw::makeComUnique( outputType );
	hr = outputTypeRef->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
	CI_ASSERT( hr == S_OK );
	hr = outputTypeRef->SetGUID( MF_MT_SUBTYPE, outputSubType );
	CI_ASSERT( hr == S_OK );

	hr = mSourceReader->SetCurrentMediaType( MF_SOURCE_READER_FIRST_AUDIO_STREAM, 0, outputTypeRef.get() );
	CI_ASSERT( hr == S_OK );

	// after the decoder is loaded, we have to now get the 'complete' output type before retrieving its format
	::IMFMediaType *completeOutputType;
	hr = mSourceReader->GetCurrentMediaType( MF_SOURCE_READER_FIRST_AUDIO_STREAM, &completeOutputType );
	CI_ASSERT( hr == S_OK );

	::WAVEFORMATEX *format;
	hr = ::MFCreateWaveFormatExFromMFMediaType( completeOutputType, &format, &formatSize );
	CI_ASSERT( hr == S_OK );
	::CoTaskMemFree( format );

	// get seconds:
	::PROPVARIANT durationProp;
	hr = mSourceReader->GetPresentationAttribute( MF_SOURCE_READER_MEDIASOURCE, MF_PD_DURATION, &durationProp );
	CI_ASSERT( hr == S_OK );
	LONGLONG duration = durationProp.uhVal.QuadPart;
	
	mSeconds = nanoSecondsToSeconds( duration );
	mNumFrames = mFileNumFrames = size_t( mSeconds * (double)mSampleRate );

	::PROPVARIANT seekProp;
	hr = mSourceReader->GetPresentationAttribute( MF_SOURCE_READER_MEDIASOURCE, MF_SOURCE_READER_MEDIASOURCE_CHARACTERISTICS, &seekProp );
	CI_ASSERT( hr == S_OK );
	ULONG flags = seekProp.ulVal;
	mCanSeek = ( ( flags & MFMEDIASOURCE_CAN_SEEK ) == MFMEDIASOURCE_CAN_SEEK );
}