void ContextXAudio::initXAudio2()
{
#if defined( CINDER_XAUDIO_2_7 )
	UINT32 flags = XAUDIO2_DEBUG_ENGINE;

	::CoInitializeEx( NULL, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE );

#else
	UINT32 flags = 0;
#endif

	HRESULT hr = ::XAudio2Create( &mXAudio, flags, XAUDIO2_DEFAULT_PROCESSOR );
	CI_ASSERT( hr == S_OK );
	hr = mXAudio->RegisterForCallbacks( mEngineCallback.get() );
	CI_ASSERT( hr == S_OK );

#if defined( CINDER_XAUDIO_2_8 )
	::XAUDIO2_DEBUG_CONFIGURATION debugConfig = {0};
	//debugConfig.TraceMask = XAUDIO2_LOG_ERRORS;
	//debugConfig.BreakMask = XAUDIO2_LOG_ERRORS;
	debugConfig.TraceMask = XAUDIO2_LOG_WARNINGS;
	debugConfig.BreakMask = XAUDIO2_LOG_WARNINGS;
	debugConfig.LogFunctionName = true;
	mXAudio->SetDebugConfiguration( &debugConfig );
#endif
}
示例#2
0
void SourceFileImplOggVorbis::init()
{
	CI_ASSERT( mDataSource );
	if( mDataSource->isFilePath() ) {
		int status = ov_fopen( mDataSource->getFilePath().string().c_str(), &mOggVorbisFile );
		if( status )
			throw AudioFileExc( string( "Failed to open Ogg Vorbis file with error: " ), (int32_t)status );
	}
	else {
		mStream = mDataSource->createStream();

		ov_callbacks callbacks;
		callbacks.read_func = readFn;
		callbacks.seek_func = seekFn;
		callbacks.close_func = closeFn;
		callbacks.tell_func = tellFn;

		int status = ov_open_callbacks( this, &mOggVorbisFile, NULL, 0, callbacks );
		CI_ASSERT( status == 0 );
	}

	vorbis_info *info = ov_info( &mOggVorbisFile, -1 );
    mSampleRate = mNativeSampleRate = info->rate;
    mNumChannels = mNativeNumChannels = info->channels;

	ogg_int64_t totalFrames = ov_pcm_total( &mOggVorbisFile, -1 );
    mNumFrames = mFileNumFrames = static_cast<uint32_t>( totalFrames );
}
示例#3
0
void Layer::init()
{
	CI_ASSERT( ! mShouldRemove ); // TODO: can probably remove this as things changed a bit
	CI_ASSERT( mRootView->mLayer == shared_from_this() );

	LOG_LAYER( "mRootView: " << mRootView->getName() );

	if( mRootView->isTransparent() ) {
		if( ! mRootView->mRendersToFrameBuffer ) {
			LOG_LAYER( "enabling FrameBuffer for view '" << mRootView->getName() << "', size: " << mRootView->getSize() );
			LOG_LAYER( "\t- reason: alpha = " << mRootView->getAlpha() );
			mRootView->mRendersToFrameBuffer = true;
		}
	}
	else {
		if( mFrameBuffer ) {
			// TODO: Consider removing, this path currently isn't reached as the Layer will be removed when View calls Graph::removeLayer().
			LOG_LAYER( "removing FrameBuffer for view '" << mRootView->getName() << "'" );
			LOG_LAYER( "\t- reason: alpha = " << mRootView->getAlpha() );
			mFrameBuffer.reset();
			mRootView->mRendersToFrameBuffer = false;
			mFrameBufferBounds = Rectf::zero();
			mGraph->removeLayer( shared_from_this() );
			return;
		}
	}
}
void ContextXAudio::initMasteringVoice()
{
	// create the IXAudio2MasteringVoice that represents the default hardware device.
	HRESULT hr = mXAudio->CreateMasteringVoice( &mMasteringVoice );
	CI_ASSERT( hr == S_OK );
	CI_ASSERT( mMasteringVoice );
}
void OutputDeviceNodeXAudio::initSourceVoice()
{
	CI_ASSERT( ! mSourceVoice );

	auto context = dynamic_pointer_cast<ContextXAudio>( getContext() );

	auto wfx = msw::interleavedFloatWaveFormat( getSampleRate(), getNumChannels() );

	IXAudio2 *xaudio = context->getXAudio();
	UINT32 flags = ( mFilterEnabled ? XAUDIO2_VOICE_USEFILTER : 0 );
	HRESULT hr = xaudio->CreateSourceVoice( &mSourceVoice, wfx.get(), flags, XAUDIO2_DEFAULT_FREQ_RATIO, mVoiceCallback.get() );
	CI_ASSERT( hr == S_OK );
}
size_t SourceFileMediaFoundation::performRead( Buffer *buffer, size_t bufferFrameOffset, size_t numFramesNeeded )
{
	CI_ASSERT( buffer->getNumFrames() >= bufferFrameOffset + numFramesNeeded );

	size_t readCount = 0;
	while( readCount < numFramesNeeded ) {

		// first drain any frames that were previously read from an IMFSample
		if( mFramesRemainingInReadBuffer ) {
			size_t remainingToDrain = std::min( mFramesRemainingInReadBuffer, numFramesNeeded );

			// TODO: use Buffer::copyChannel
			for( size_t ch = 0; ch < mNumChannels; ch++ ) {
				float *readChannel = mReadBuffer.getChannel( ch ) + mReadBufferPos;
				float *resultChannel = buffer->getChannel( ch );
				memcpy( resultChannel + readCount, readChannel, remainingToDrain * sizeof( float ) );
			}

			mReadBufferPos += remainingToDrain;
			mFramesRemainingInReadBuffer -= remainingToDrain;
			readCount += remainingToDrain;
			continue;
		}

		CI_ASSERT( ! mFramesRemainingInReadBuffer );

		mReadBufferPos = 0;
		size_t outNumFrames = processNextReadSample();
		if( ! outNumFrames )
			break;

		// if the IMFSample num frames is over the specified buffer size, 
		// record how many samples are left over and use up what was asked for.
		if( outNumFrames + readCount > numFramesNeeded ) {
			mFramesRemainingInReadBuffer = outNumFrames + readCount - numFramesNeeded;
			outNumFrames = numFramesNeeded - readCount;
		}

		size_t offset = bufferFrameOffset + readCount;
		for( size_t ch = 0; ch < mNumChannels; ch++ ) {
			float *readChannel = mReadBuffer.getChannel( ch );
			float *resultChannel = buffer->getChannel( ch );
			memcpy( resultChannel + readCount, readChannel, outNumFrames * sizeof( float ) );
		}

		mReadBufferPos += outNumFrames;
		readCount += outNumFrames;
	}

	return readCount;
}
void ProjectManagerApp::fileDrop(FileDropEvent event)
{
	StringArray drops;

	for (auto it : event.getFiles())
	{
		std::string path = it.string();

		File file(path);
		if (file.exists())
		{
			drops.addIfNotAlreadyThere(path);
		}
	}

	CI_ASSERT(drops.size() == 1);

	int count = handler.processCinderRoot(*drops.begin());
	if (count < 0)
	{
		gui->postWarningMessage("Fatal error! ", "Dropped folder is not Cinder root folder");
	}
	else
	{
		String msg = String(count) + " vc2015 projects created!";
		gui->postInfoMessage("Done ", msg.toStdString());
	}
}
示例#8
0
void sumBuffers( const Buffer *sourceBuffer, Buffer *destBuffer, size_t numFrames )
{
	size_t sourceChannels = sourceBuffer->getNumChannels();
	size_t destChannels = destBuffer->getNumChannels();

	if( destChannels == sourceBuffer->getNumChannels() ) {
		for( size_t c = 0; c < destChannels; c++ )
			add( destBuffer->getChannel( c ), sourceBuffer->getChannel( c ), destBuffer->getChannel( c ), numFrames );
	}
	else if( sourceChannels == 1 ) {
		// up-mix mono sourceBuffer to destChannels
		const float *sourceChannel0 = sourceBuffer->getChannel( 0 );
		for( size_t c = 0; c < destChannels; c++ )
			add( destBuffer->getChannel( c ), sourceChannel0, destBuffer->getChannel( c ), numFrames );
	}
	else if( destChannels == 1 ) {
		// down-mix mono destBuffer to sourceChannels, multiply by an equal-power normalizer to help prevent clipping
		const float downMixNormalizer = 1.0f / std::sqrt( 2.0f );
		float *destChannel0 = destBuffer->getChannel( 0 );
		for( size_t c = 0; c < sourceChannels; c++ )
			addMul( destChannel0, sourceBuffer->getChannel( c ), downMixNormalizer, destChannel0, numFrames );
	}
	else
		CI_ASSERT( 0 && "unhandled" );
}
TargetFileMediaFoundation::~TargetFileMediaFoundation()
{
	if( mSinkWriter ) {
		HRESULT hr = mSinkWriter->Finalize();
		CI_ASSERT( hr == S_OK );
	}
}
示例#10
0
ScopedRenderbuffer::ScopedRenderbuffer( GLenum target, GLuint id )
	: mCtx( gl::context() )
{
	// this is the only legal value currently
	CI_ASSERT( target == GL_RENDERBUFFER );
	mCtx->pushRenderbufferBinding( target, id );
}
示例#11
0
size_t SourceFileOggVorbis::performRead( Buffer *buffer, size_t bufferFrameOffset, size_t numFramesNeeded )
{
	CI_ASSERT( buffer->getNumFrames() >= bufferFrameOffset + numFramesNeeded );

	size_t readCount = 0;
	while( readCount < numFramesNeeded ) {
		float **outChannels;
		int section;

		long outNumFrames = ov_read_float( &mOggVorbisFile, &outChannels, int( numFramesNeeded - readCount ), &section );
		if( outNumFrames <= 0 ) {
			if( outNumFrames < 0 )
				throw AudioFileExc( "ov_read_float error", (int32_t)outNumFrames );

			break;
		}

		size_t offset = bufferFrameOffset + readCount;
		for( size_t ch = 0; ch < mNumChannels; ch++ )
			memcpy( buffer->getChannel( ch ) + offset, outChannels[ch], outNumFrames * sizeof( float ) );

		readCount += outNumFrames;
	}

	return static_cast<size_t>( readCount );
}
示例#12
0
void Context::addSoftBody( btSoftBody *body, int16_t collisionGroup, int16_t collisionMask )
{
	CI_ASSERT(mWorld->getWorldType() == BT_SOFT_RIGID_DYNAMICS_WORLD);
	btSoftRigidDynamicsWorld* softWorld = static_cast<btSoftRigidDynamicsWorld*>(mWorld);
	if( softWorld )
		softWorld->addSoftBody( body, collisionGroup, collisionMask );
}
shared_ptr<OutputDeviceNodeXAudio> ContextXAudio::getOutputDeviceNodeXAudio() const
{
	auto result = dynamic_pointer_cast<OutputDeviceNodeXAudio>( mOutput );
	CI_ASSERT( result );

	return result;
}
示例#14
0
文件: Node.cpp 项目: cinder/cinder
void Node::pullInputs( Buffer *inPlaceBuffer )
{
	CI_ASSERT( getContext() );

	if( mProcessInPlace ) {
		if( mInputs.empty() ) {
			// Fastest route: no inputs and process in-place. inPlaceBuffer must be cleared so that samples left over
			// from InputNode's that aren't filling the entire buffer are zero.
			inPlaceBuffer->zero();
			if( mEnabled )
				process( inPlaceBuffer );
		}
		else {
			// First pull the input (can only be one when in-place), then run process() if input did any processing.
			const NodeRef &input = *mInputs.begin();
			input->pullInputs( inPlaceBuffer );

			if( ! input->getProcessesInPlace() )
				dsp::mixBuffers( input->getInternalBuffer(), inPlaceBuffer );

			if( mEnabled )
				process( inPlaceBuffer );
		}
	}
	else {
		// Pull and sum all enabled inputs. Only do this once per processing block, which is checked by the current number of processed frames.
		uint64_t numProcessedFrames = getContext()->getNumProcessedFrames();
		if( mLastProcessedFrame != numProcessedFrames ) {
			mLastProcessedFrame = numProcessedFrames;

			mSummingBuffer.zero();
			sumInputs();
		}
	}
}
示例#15
0
void Context::removeSoftBody( btSoftBody *body )
{
	CI_ASSERT(mWorld->getWorldType() == BT_SOFT_RIGID_DYNAMICS_WORLD);
	btSoftRigidDynamicsWorld* softWorld = static_cast<btSoftRigidDynamicsWorld*>(mWorld);
	if( softWorld )
		softWorld->removeSoftBody( body );
}
示例#16
0
TargetFileOggVorbis::TargetFileOggVorbis( const DataTargetRef &dataTarget, size_t sampleRate, size_t numChannels, SampleType sampleType )
	: cinder::audio::TargetFile( sampleRate, numChannels, sampleType ), mDataTarget( dataTarget )
{
	CI_ASSERT( mDataTarget );
	mStream = mDataTarget->getStream();

	vorbis_info_init( &mVorbisInfo );

	auto status = vorbis_encode_init_vbr( &mVorbisInfo, getNumChannels(), getSampleRate(), mVorbisBaseQuality );
	if ( status ) {
		throw AudioFormatExc( string( "TargetFileOggVorbis: invalid quality setting." ) );
	}

	vorbis_comment_init( &mVorbisComment );
	vorbis_comment_add_tag( &mVorbisComment, "ENCODER", "libcinder" );

	vorbis_analysis_init( &mVorbisDspState, &mVorbisInfo );
	vorbis_block_init( &mVorbisDspState, &mVorbisBlock );

	// a constant stream serial number is used, this is okay since no streams are multiplexed
	ogg_stream_init( &mOggStream, 0 );

	ogg_packet header, headerComment, headerCodebook;

	vorbis_analysis_headerout( &mVorbisDspState, &mVorbisComment, &header, &headerComment, &headerCodebook );
	ogg_stream_packetin( &mOggStream, &header );
	ogg_stream_packetin( &mOggStream, &headerComment );
	ogg_stream_packetin( &mOggStream, &headerCodebook );

	// flush ogg page so audio data starts on a new page
	while ( ogg_stream_flush( &mOggStream, &mOggPage ) != 0 ) {
		mStream->writeData( mOggPage.header, mOggPage.header_len );
		mStream->writeData( mOggPage.body, mOggPage.body_len );
	}
}
void ContextXAudio::enable()
{
	HRESULT hr = mXAudio->StartEngine();
	CI_ASSERT( hr == S_OK );

	Context::enable();
}
void OutputDeviceNodeXAudio::submitNextBuffer()
{
	auto ctx = getContext();
	if( ! ctx )
		return;

	lock_guard<mutex> lock( ctx->getMutex() );

	// verify context still exists, since its destructor may have been holding the lock
	ctx = getContext();
	if( ! ctx )
		return;

	ctx->preProcess();

	auto internalBuffer = getInternalBuffer();
	internalBuffer->zero();
	pullInputs( internalBuffer );

	if( checkNotClipping() )
		internalBuffer->zero();

	if( getNumChannels() == 2 )
		dsp::interleaveStereoBuffer( internalBuffer, &mBufferInterleaved );

	HRESULT hr = mSourceVoice->SubmitSourceBuffer( &mXAudioBuffer );
	CI_ASSERT( hr == S_OK );

	ctx->postProcess();
}
示例#19
0
// static
void MediaFoundationInitializer::shutdownMediaFoundation()
{
	if( sIsMfInitialized ) {
		sIsMfInitialized = false;
		HRESULT hr = ::MFShutdown();
		CI_ASSERT( hr == S_OK );
	}
}
示例#20
0
// static
void MediaFoundationInitializer::initMediaFoundation()
{
	if( ! sIsMfInitialized ) {
		sIsMfInitialized = true;
		HRESULT hr = ::MFStartup( MF_VERSION );
		CI_ASSERT( hr == S_OK );
	}
}
示例#21
0
void SelectorBase::select( size_t index )
{
	CI_ASSERT( index < mSegments.size() );

	if( mSelectedIndex != index ) {
		mSelectedIndex = index;
		getSignalValueChanged().emit();
	}
}
示例#22
0
文件: AppLinux.cpp 项目: Ahbee/Cinder
AppLinux::AppLinux()
	: AppBase()
{
	const Settings *settings = dynamic_cast<Settings *>( sSettingsFromMain );
	CI_ASSERT( settings );

	enablePowerManagement( settings->isPowerManagementEnabled() ); // TODO: consider moving to common method

	mImpl =  new AppImplLinux( this, *settings );
}
示例#23
0
文件: Node.cpp 项目: cinder/cinder
void Node::setupProcessWithSumming()
{
	CI_ASSERT( getContext() );

	mProcessInPlace = false;
	size_t framesPerBlock = getFramesPerBlock();

	mInternalBuffer.setSize( framesPerBlock, mNumChannels );
	mSummingBuffer.setSize( framesPerBlock, mNumChannels );
}
示例#24
0
// note that in the following methods, sourceBuffer may have less frames than mBufferd, which is common at EOF. Its okay, but make sure readCount reflects this
pair<size_t, size_t> ConverterImplR8brain::convert( const Buffer *sourceBuffer, Buffer *destBuffer )
{
	CI_ASSERT( sourceBuffer->getNumChannels() == mSourceNumChannels && destBuffer->getNumChannels() == mDestNumChannels );

	int readCount = (int)min( sourceBuffer->getNumFrames(), mSourceMaxFramesPerBlock );

	// debug ensure that destBuffer is large enough
	CI_ASSERT( destBuffer->getNumFrames() >= ( readCount * (float)mDestSampleRate / (float)mSourceSampleRate ) );

	if( mSourceSampleRate == mDestSampleRate ) {
		mixBuffers( sourceBuffer, destBuffer, readCount );
		return make_pair( readCount, readCount );
	}
	else if( mSourceNumChannels == mDestNumChannels )
		return convertImpl( sourceBuffer, destBuffer, readCount );
	else if( mSourceNumChannels > mDestNumChannels )
		return convertImplDownMixing( sourceBuffer, destBuffer, readCount );

	return convertImplUpMixing( sourceBuffer, destBuffer, readCount );
}
示例#25
0
AppMsw::AppMsw()
{
	sInstance = this;

	const Settings *settings = dynamic_cast<Settings *>( sSettingsFromMain );
	CI_ASSERT( settings );

	// pull out app-level variables
	enablePowerManagement( settings->isPowerManagementEnabled() ); // TODO: consider moving to common method

	mImpl.reset( new AppImplMswBasic( this, *settings ) );
}
示例#26
0
Converter::Converter( size_t sourceSampleRate, size_t destSampleRate, size_t sourceNumChannels, size_t destNumChannels, size_t sourceMaxFramesPerBlock )
	: mSourceSampleRate( sourceSampleRate ), mDestSampleRate( destSampleRate ), mSourceNumChannels( sourceNumChannels ), mDestNumChannels( destNumChannels ), mSourceMaxFramesPerBlock( sourceMaxFramesPerBlock )
{
	CI_ASSERT( mSourceSampleRate && mSourceNumChannels && mSourceMaxFramesPerBlock );

	if( ! mDestSampleRate )
		mDestSampleRate = mSourceSampleRate;
	if( ! mDestNumChannels )
		mDestNumChannels = mSourceNumChannels;

	mDestMaxFramesPerBlock = (size_t)ceil( (float)mSourceMaxFramesPerBlock * (float)mDestSampleRate / (float)mSourceSampleRate );
}
示例#27
0
void SourceFileMediaFoundation::performSeek( size_t readPositionFrames )
{
	if( ! mCanSeek )
		return;

	mReadBufferPos = mFramesRemainingInReadBuffer = 0;

	double positionSeconds = (double)readPositionFrames / (double)mSampleRate;
	if( positionSeconds > mSeconds ) {
		// don't attempt seek beyond bounds
		return;
	}

	LONGLONG position = secondsToNanoSeconds( positionSeconds );
	PROPVARIANT seekVar;
	HRESULT hr = ::InitPropVariantFromInt64( position, &seekVar );
	CI_ASSERT( hr == S_OK );
	hr = mSourceReader->SetCurrentPosition( GUID_NULL, seekVar );
	CI_ASSERT( hr == S_OK );
	hr = PropVariantClear( &seekVar );
	CI_ASSERT( hr == S_OK );
}
示例#28
0
size_t SourceFile::read( Buffer *buffer )
{
	CI_ASSERT( buffer->getNumChannels() == getNumChannels() );
	CI_ASSERT( mReadPos < mNumFrames );

	size_t numRead;

	if( mConverter ) {
		size_t sourceBufFrames = size_t( buffer->getNumFrames() * (float)getSampleRateNative() / (float)getSampleRate() );
		size_t numFramesNeeded = std::min( mFileNumFrames - mReadPos, std::min( getMaxFramesPerRead(), sourceBufFrames ) );

		mConverterReadBuffer.setNumFrames( numFramesNeeded );
		performRead( &mConverterReadBuffer, 0, numFramesNeeded );
		pair<size_t, size_t> count = mConverter->convert( &mConverterReadBuffer, buffer );
		numRead = count.second;
	}
	else {
		size_t numFramesNeeded = std::min( mNumFrames - mReadPos, std::min( getMaxFramesPerRead(), buffer->getNumFrames() ) );
		numRead = performRead( buffer, 0, numFramesNeeded );
	}

	mReadPos += numRead;
	return numRead;
}
示例#29
0
OutputDeviceNode::OutputDeviceNode( const DeviceRef &device, const Format &format )
	: OutputNode( format ), mDevice( device )
{
	CI_ASSERT( mDevice );

	// listen to the notifications sent by device property changes in order to update the audio graph.
	mWillChangeConn = mDevice->getSignalParamsWillChange().connect( bind( &OutputDeviceNode::deviceParamsWillChange, this ) );
	mDidChangeConn = mDevice->getSignalParamsDidChange().connect( bind( &OutputDeviceNode::deviceParamsDidChange, this ) );

	size_t deviceNumChannels = mDevice->getNumOutputChannels();

	// If number of channels hasn't been specified, default to 2 (or 1 if that is all that is available).
	if( getChannelMode() != ChannelMode::SPECIFIED ) {
		setChannelMode( ChannelMode::SPECIFIED );
		setNumChannels( std::min( deviceNumChannels, (size_t)2 ) );
	}

	// Double check the device has enough channels to support what was requested, which may not be the case if the user asked for more than what is available.
	if( deviceNumChannels < getNumChannels() )
		throw AudioFormatExc( string( "Device can not accommodate " ) + to_string( deviceNumChannels ) + " output channels." );
}
示例#30
0
BufferRef SourceFile::loadBuffer()
{
	seek( 0 );

	BufferRef result = make_shared<Buffer>( mNumFrames, getNumChannels() );

	if( mConverter ) {
		// TODO: need BufferView's in order to reduce number of copies
		Buffer converterDestBuffer( mConverter->getDestMaxFramesPerBlock(), getNumChannels() );
		size_t readCount = 0;
		while( true ) {
			size_t framesNeeded = min( getMaxFramesPerRead(), mFileNumFrames - readCount );
			if( framesNeeded == 0 )
				break;

			// make sourceBuffer num frames match outNumFrames so that Converter doesn't think it has more
			if( framesNeeded != mConverterReadBuffer.getNumFrames() )
				mConverterReadBuffer.setNumFrames( framesNeeded );

			size_t outNumFrames = performRead( &mConverterReadBuffer, 0, framesNeeded );
			CI_ASSERT( outNumFrames == framesNeeded );

			pair<size_t, size_t> count = mConverter->convert( &mConverterReadBuffer, &converterDestBuffer );
			result->copyOffset( converterDestBuffer, count.second, mReadPos, 0 );

			readCount += outNumFrames;
			mReadPos += count.second;
		}
	}
	else {
		size_t readCount = performRead( result.get(), 0, mNumFrames );
		mReadPos = readCount;
	}

	return result;
}