示例#1
0
void DeviceTestApp::setupNoise()
{
    mGen = audio::master()->makeNode( new audio::GenNoiseNode() );

    mGen->connect( mGain );
    mGen->enable();
}
示例#2
0
void DeviceTestApp::setupSine()
{
    mGen = audio::master()->makeNode( new audio::GenSineNode() );
    mGen->setFreq( 440 );

    mGen->connect( mGain );
    mGen->enable();
}
示例#3
0
void SpectralTestApp::setup()
{
	mSpectroMargin = 40.0f;

	auto ctx = audio::master();

	auto format = audio::MonitorSpectralNode::Format().fftSize( FFT_SIZE ).windowSize( WINDOW_SIZE ).windowType( WINDOW_TYPE );
	mMonitorSpectralNode = ctx->makeNode( new audio::MonitorSpectralNode( format ) );
	mMonitorSpectralNode->setAutoEnabled();

	//mGen = ctx->makeNode( new audio::GenSineNode() );
	mGen = ctx->makeNode( new audio::GenTriangleNode() );
	mGen->setFreq( 440.0f );

	mSourceFile = audio::load( loadResource( RES_TONE440L220R_MP3 ), ctx->getSampleRate() );

	auto audioBuffer = mSourceFile->loadBuffer();
	CI_LOG_V( "loaded source buffer, frames: " << audioBuffer->getNumFrames() );

	mPlayerNode = ctx->makeNode( new audio::BufferPlayerNode( audioBuffer ) );

	setupSine();

	setupUI();

	ctx->enable();
	mEnableGraphButton.setEnabled( true );

	mScaleDecibelsButton.setEnabled( mSpectrumPlot.getScaleDecibels() );

	PRINT_GRAPH( ctx );

	CI_LOG_V( "MonitorSpectralNode fftSize: " << mMonitorSpectralNode->getFftSize() << ", windowSize: " << mMonitorSpectralNode->getWindowSize() );
}
示例#4
0
void ParamTestApp::setup()
{
	auto ctx = audio::master();
	mGain = ctx->makeNode( new audio::GainNode() );
	mGain->setValue( 0.8f );

	mPan = ctx->makeNode( new audio::Pan2dNode() );

	mGen = ctx->makeNode( new audio::GenSineNode() );
//	mGen = ctx->makeNode( new audio::GenTriangleNode() );
//	mGen = ctx->makeNode( new audio::GenPhasorNode() );
	mGen = ctx->makeNode( new audio::GenPulseNode );

	mGen->setFreq( 220 );

	mLowPass = ctx->makeNode( new audio::FilterLowPassNode() );

	setupBasic();

	setupUI();

	PRINT_GRAPH( ctx );

	testApply();
//	testApply2();
//	connectProcessor();
}
示例#5
0
void WebAudioApp::setup()
{

  Context::setMaster( new ContextWebAudio, new DeviceManagerWebAudio );

  auto ctx = audio::master();
  mGen = ctx->makeNode( new audio::GenSineNode );
  mGain = ctx->makeNode( new audio::GainNode );

  mGen->setFreq( 220 );
  mGain->setValue( 0.5f );

  // connections can be made this way or with connect(). The master Context's getOutput() is the speakers by default.
  mGen >> mGain >> ctx->getOutput();
  mGen->enable();
  ctx->enable();
}
void NodeAdvancedApp::update()
{
	size_t seqPeriod = 10 * randInt( 1, 4 );

	if( getElapsedFrames() % seqPeriod == 0 ) {
		size_t index = randInt( mCPentatonicScale.size() );
		size_t midiPitch = mCPentatonicScale.at( index );
		mGen->getParamFreq()->applyRamp( audio::midiToFreq( midiPitch ), mFreqRampTime );
	}
}
示例#7
0
void NodeBasic::setup()
{
	// You use the audio::Context to make new audio::Node instances (audio::master() is the speaker-facing Context).
	auto ctx = audio::master();
	mGen = ctx->makeNode( new audio::GenSineNode );
	mGain = ctx->makeNode( new audio::GainNode );

	mGen->setFreq( 220 );
	mGain->setValue( 0.5f );

	// connections can be made this way or with connect(). The master Context's getOutput() is the speakers by default.
	mGen >> mGain >> ctx->getOutput();

	// Node's need to be enabled to process audio. EffectNode's are enabled by default, while NodeSource's (like Gen) need to be switched on.
	mGen->enable();

	// Context also must be started. Starting and stopping this controls the entire DSP graph.
	ctx->enable();
}
示例#8
0
void DeviceTestApp::setupSendStereo()
{
    auto ctx = audio::master();
    ctx->disconnectAllNodes();

    auto router = ctx->makeNode( new audio::ChannelRouterNode( audio::Node::Format().channels( mOutputDeviceNode->getNumChannels()	) ) );
    auto upmix = ctx->makeNode( new audio::Node( audio::Node::Format().channels( 2 ) ) );

    int channelIndex = mSendChannelInput.getValue();
    CI_LOG_V( "routing input to channel: " << channelIndex );

    mGen >> upmix >> mGain >> router->route( 0, channelIndex );
    router >> mMonitor >> ctx->getOutput();

    mGen->enable();
}
void NodeSubclassingApp::setup()
{
	auto ctx = audio::master();

	// We create an audio graph that demonstrates the CustomTremeloNode bing used in combination with cinder-defined Node's.
	mGenNode = ctx->makeNode( new audio::GenOscNode( audio::WaveformType::SQUARE, 220 ) );
	mMonitorNode = ctx->makeNode( new audio::MonitorNode );
	auto gain = ctx->makeNode( new audio::GainNode( 0.2f ) );

	// Here we make our custom Node. For the sake of demonstration, its channel count is forced to stereo.
	mCustomTremeloNode = ctx->makeNode( new CustomTremoloNode( audio::Node::Format().channels( 2 ) ) );

	mGenNode >> mCustomTremeloNode >> mMonitorNode >> gain >> ctx->getOutput();

	// Turn on the GenNode and fire up the Context.
	mGenNode->enable();
	ctx->enable();

	console() << "\naudio graph:\n" << ctx->printGraphToString() << endl;
}
示例#10
0
void NodeAdvancedApp::draw()
{
	gl::clear();

	// Draw the Scope's recorded Buffer in the upper right.
	if( mMonitor && mMonitor->getNumConnectedInputs() ) {
		Rectf scopeRect( getWindowWidth() - 210, 10, getWindowWidth() - 10, 110 );
		drawAudioBuffer( mMonitor->getBuffer(), scopeRect, true );
	}

	// Visualize the Gen's current pitch with a circle.

	float pitchMin = mCPentatonicScale.front();
	float pitchMax = mCPentatonicScale.back();
	float currentPitch = audio::freqToMidi( mGen->getFreq() ); // MIDI values do not have to be integers for us.

	float percent = ( currentPitch - pitchMin ) / ( pitchMax - pitchMin );

	float circleX = percent * getWindowWidth();

	gl::color( 0, 0.8f, 0.8f );
	gl::drawSolidCircle( vec2( circleX, getWindowCenter().y ), 50 );
}
示例#11
0
void NodeBasic::mouseDrag( MouseEvent event )
{
	mGen->setFreq( event.getPos().x );
	mGain->setValue( 1.0f - (float)event.getPos().y / (float)getWindowHeight() );
}