Esempio n. 1
0
// static 
int SourceFileOggVorbis::seekFn( void *datasource, ogg_int64_t offset, int whence )
{
	auto sourceFile = (SourceFileOggVorbis *)datasource;

	switch( whence ) {
		case SEEK_SET:
			sourceFile->mStream->seekAbsolute( (off_t)offset );
			break;
		case SEEK_CUR:
			sourceFile->mStream->seekRelative( (off_t)offset );
			break;
		case SEEK_END:
			sourceFile->mStream->seekAbsolute( sourceFile->mStream->size() );
			break;
		default:
			CI_ASSERT_NOT_REACHABLE();
			return -1;
	}

	return 0;
}
Esempio n. 2
0
void TargetFileMediaFoundation::performWrite( const Buffer *buffer, size_t numFrames, size_t frameOffset )
{
	// create media sample
	::IMFSample *mediaSample;
	HRESULT hr = ::MFCreateSample( &mediaSample );
	CI_ASSERT( hr == S_OK );

	auto samplePtr = ci::msw::makeComUnique( mediaSample );

	double lengthSeconds = (double)numFrames / (double)mSampleRate;
	const LONGLONG sampleDuration = secondsToNanoSeconds( lengthSeconds );
	hr = mediaSample->SetSampleDuration( sampleDuration );
	CI_ASSERT( hr == S_OK );

	double currentTimeSeconds = (double)frameOffset / (double)mSampleRate;
	const LONGLONG sampleTime = secondsToNanoSeconds( currentTimeSeconds );
	hr = mediaSample->SetSampleTime( sampleTime );
	CI_ASSERT( hr == S_OK );

	// create media buffer and fill with audio samples.

	DWORD bufferSizeBytes = numFrames * buffer->getNumChannels() * mSampleSize;
	::IMFMediaBuffer *mediaBuffer;
	hr = ::MFCreateMemoryBuffer( bufferSizeBytes, &mediaBuffer );
	CI_ASSERT( hr == S_OK );

	hr = mediaBuffer->SetCurrentLength( bufferSizeBytes );
	CI_ASSERT( hr == S_OK );

	hr = mediaSample->AddBuffer( mediaBuffer );
	CI_ASSERT( hr == S_OK );

	BYTE *audioData;
	hr = mediaBuffer->Lock( &audioData, NULL, NULL );
	CI_ASSERT( hr == S_OK );

	if( mSampleType == SampleType::FLOAT_32 ) {
		float *destFloatSamples = (float *)audioData;
		if( mNumChannels == 1 )
			memcpy( destFloatSamples, buffer->getData(), numFrames * mSampleSize );
		else
			dsp::interleave( buffer->getData(), destFloatSamples, buffer->getNumFrames(), mNumChannels, numFrames );
	}
	else if( mSampleType == SampleType::INT_16 ) {
		int16_t *destInt16Samples = (int16_t *)audioData;
		dsp::interleave( buffer->getData(), destInt16Samples, buffer->getNumFrames(), mNumChannels, numFrames );
	}
	else if( mSampleType == SampleType::INT_24 ) {
		char *destInt24Samples = (char *)audioData;
		if( mNumChannels == 1 )
			dsp::convertFloatToInt24( buffer->getData(), destInt24Samples, numFrames );
		else {
			if( mBitConverterBuffer.getNumFrames() != numFrames || mBitConverterBuffer.getNumChannels() != mNumChannels  )
				mBitConverterBuffer.setSize( numFrames, mNumChannels );

			dsp::interleaveBuffer( buffer, &mBitConverterBuffer );
			dsp::convertFloatToInt24( mBitConverterBuffer.getData(), destInt24Samples, numFrames * mNumChannels );
		}
	}

	else
		CI_ASSERT_NOT_REACHABLE();

	hr = mediaBuffer->Unlock();
	CI_ASSERT( hr == S_OK );

	hr = mSinkWriter->WriteSample( mStreamIndex, mediaSample );
	CI_ASSERT( hr == S_OK );
}
Esempio n. 3
0
size_t SourceFileMediaFoundation::processNextReadSample()
{
	::IMFSample *mediaSample;
	DWORD streamFlags = 0;
	LONGLONG timeStamp;
	HRESULT hr = mSourceReader->ReadSample( MF_SOURCE_READER_FIRST_AUDIO_STREAM, 0, NULL, &streamFlags, &timeStamp, &mediaSample );
	CI_ASSERT( hr == S_OK );

	if( streamFlags & MF_SOURCE_READERF_CURRENTMEDIATYPECHANGED ) {
		CI_LOG_W( "type change unhandled" );
		return 0;
	}
	if( streamFlags & MF_SOURCE_READERF_ENDOFSTREAM ) {
		// end of file
		return 0;
	}
	if( ! mediaSample ) {
		// out of samples
		mediaSample->Release();
		return 0;
	}

	auto samplePtr = ci::msw::makeComUnique( mediaSample );

	DWORD bufferCount;
	hr = samplePtr->GetBufferCount( &bufferCount );
	CI_ASSERT( hr == S_OK );

	CI_ASSERT( bufferCount == 1 ); // just looking out for a file type with more than one buffer.. haven't seen one yet.

	// get the buffer
	::IMFMediaBuffer *mediaBuffer;
	BYTE *audioData = NULL;
	DWORD audioDataLength;

	hr = samplePtr->ConvertToContiguousBuffer( &mediaBuffer );
	hr = mediaBuffer->Lock( &audioData, NULL, &audioDataLength );

	size_t numChannels = mNumChannels;
	size_t numFramesRead = audioDataLength / ( mBytesPerSample * numChannels );

	mReadBuffer.setNumFrames( numFramesRead );

	if( mSampleType == SampleType::FLOAT_32 ) {
		float *sourceFloatSamples = (float *)audioData;
		if( numChannels == 1 )
			memcpy( mReadBuffer.getData(), sourceFloatSamples, numFramesRead * sizeof( float ) );
		else
			dsp::deinterleave( sourceFloatSamples, mReadBuffer.getData(), mReadBuffer.getNumFrames(), numChannels, numFramesRead );
	}
	else if( mSampleType == SampleType::INT_16 ) {
		int16_t *sourceInt16Samples = (int16_t *)audioData;
		dsp::deinterleave( sourceInt16Samples, mReadBuffer.getData(), mReadBuffer.getNumFrames(), numChannels, numFramesRead );
	}
	else if( mSampleType == SampleType::INT_24 ) {
		const char *sourceInt24Samples = (const char *)audioData;
		if( numChannels == 1 )
			dsp::convertInt24ToFloat( sourceInt24Samples, mReadBuffer.getData(), numFramesRead );
		else {
			if( mBitConverterBuffer.getNumFrames() != numFramesRead )
				mBitConverterBuffer.setNumFrames( numFramesRead );

			dsp::convertInt24ToFloat( sourceInt24Samples, mBitConverterBuffer.getData(), numFramesRead * numChannels );
			dsp::deinterleave( mBitConverterBuffer.getData(), mReadBuffer.getData(), mReadBuffer.getNumFrames(), numChannels, numFramesRead );
		}
	}
	else
		CI_ASSERT_NOT_REACHABLE();

	hr = mediaBuffer->Unlock();
	CI_ASSERT( hr == S_OK );

	mediaBuffer->Release();
	return numFramesRead;
}