void SpectralTestApp::setup() { mSpectroMargin = 40.0f; auto ctx = audio::master(); auto format = audio::MonitorSpectralNode::Format().fftSize( FFT_SIZE ).windowSize( WINDOW_SIZE ).windowType( WINDOW_TYPE ); mMonitorSpectralNode = ctx->makeNode( new audio::MonitorSpectralNode( format ) ); mMonitorSpectralNode->setAutoEnabled(); //mGen = ctx->makeNode( new audio::GenSineNode() ); mGen = ctx->makeNode( new audio::GenTriangleNode() ); mGen->setFreq( 440.0f ); mSourceFile = audio::load( loadResource( RES_TONE440L220R_MP3 ), ctx->getSampleRate() ); auto audioBuffer = mSourceFile->loadBuffer(); CI_LOG_V( "loaded source buffer, frames: " << audioBuffer->getNumFrames() ); mPlayerNode = ctx->makeNode( new audio::BufferPlayerNode( audioBuffer ) ); setupSine(); setupUI(); ctx->enable(); mEnableGraphButton.setEnabled( true ); mScaleDecibelsButton.setEnabled( mSpectrumPlot.getScaleDecibels() ); PRINT_GRAPH( ctx ); CI_LOG_V( "MonitorSpectralNode fftSize: " << mMonitorSpectralNode->getFftSize() << ", windowSize: " << mMonitorSpectralNode->getWindowSize() ); }
void ParamTestApp::setup() { auto ctx = audio::master(); mGain = ctx->makeNode( new audio::GainNode() ); mGain->setValue( 0.8f ); mPan = ctx->makeNode( new audio::Pan2dNode() ); mGen = ctx->makeNode( new audio::GenSineNode() ); // mGen = ctx->makeNode( new audio::GenTriangleNode() ); // mGen = ctx->makeNode( new audio::GenPhasorNode() ); mGen = ctx->makeNode( new audio::GenPulseNode ); mGen->setFreq( 220 ); mLowPass = ctx->makeNode( new audio::FilterLowPassNode() ); setupBasic(); setupUI(); PRINT_GRAPH( ctx ); testApply(); // testApply2(); // connectProcessor(); }
void DeviceTestApp::setupSine() { mGen = audio::master()->makeNode( new audio::GenSineNode() ); mGen->setFreq( 440 ); mGen->connect( mGain ); mGen->enable(); }
void WebAudioApp::setup() { Context::setMaster( new ContextWebAudio, new DeviceManagerWebAudio ); auto ctx = audio::master(); mGen = ctx->makeNode( new audio::GenSineNode ); mGain = ctx->makeNode( new audio::GainNode ); mGen->setFreq( 220 ); mGain->setValue( 0.5f ); // connections can be made this way or with connect(). The master Context's getOutput() is the speakers by default. mGen >> mGain >> ctx->getOutput(); mGen->enable(); ctx->enable(); }
void NodeBasic::setup() { // You use the audio::Context to make new audio::Node instances (audio::master() is the speaker-facing Context). auto ctx = audio::master(); mGen = ctx->makeNode( new audio::GenSineNode ); mGain = ctx->makeNode( new audio::GainNode ); mGen->setFreq( 220 ); mGain->setValue( 0.5f ); // connections can be made this way or with connect(). The master Context's getOutput() is the speakers by default. mGen >> mGain >> ctx->getOutput(); // Node's need to be enabled to process audio. EffectNode's are enabled by default, while NodeSource's (like Gen) need to be switched on. mGen->enable(); // Context also must be started. Starting and stopping this controls the entire DSP graph. ctx->enable(); }
void NodeBasic::mouseDrag( MouseEvent event ) { mGen->setFreq( event.getPos().x ); mGain->setValue( 1.0f - (float)event.getPos().y / (float)getWindowHeight() ); }