void DeviceTestApp::setupInputPulled() { mOutputDeviceNode->disconnectAllInputs(); mInputDeviceNode >> mGain >> mMonitor; mInputDeviceNode->enable(); }
void DeviceTestApp::setupIOAndSine() { mGen = audio::master()->makeNode( new audio::GenSineNode() ); mGen->setFreq( 440 ); mGen->connect( mGain ); mGen->enable(); mInputDeviceNode->connect( mGain ); mInputDeviceNode->enable(); }
void ciApp::setup() { setWindowSize(1280, 720); setFrameRate(60.f); int maxVertUniformsVect; glGetIntegerv(GL_MAX_VERTEX_UNIFORM_VECTORS, &maxVertUniformsVect); mSize = 0; mSizePrev = -1; mSizeMax = 17; mAmplifier = 1.f; mExposure = 1.f; mGamma = 2.2f; printf("max uniform: %i, %i\n", maxVertUniformsVect, mSizeMax); mParams = params::InterfaceGl::create(getWindow(), "App parameters", ivec2(250, 300)); mParams->setPosition(ivec2(20, 250)); mTexture = gl::Texture::create(loadImage(loadFile(data_path + "demo.png"))); mFbo = gl::Fbo::create(mTexture->getWidth(), mTexture->getHeight(), gl::Fbo::Format().colorTexture()); //mShader.setup("filterGaussianBlur"); filter = hb::GlslFilter::create(mTexture->getSize()); filter->setParams(mParams); vector_blur.setup(getWindowSize()); vector_blur.setParams(mParams); spout_receiver = hb::Receiver::create("Spout DX11 Sender"); //spout_receiver = hbSpoutReceiver::create("KidsLandSea"); spout_sender = hb::Sender::create("cinder_spout", mFbo->getWidth(), mFbo->getHeight()); #if 0 auto ctx = audio::Context::master(); // The InputDeviceNode is platform-specific, so you create it using a special method on the Context: mInputDeviceNode = ctx->createInputDeviceNode(); // By providing an FFT size double that of the window size, we 'zero-pad' the analysis data, which gives // an increase in resolution of the resulting spectrum data. auto monitorFormat = audio::MonitorSpectralNode::Format().fftSize(2048).windowSize(1024); mMonitorSpectralNode = ctx->makeNode(new audio::MonitorSpectralNode(monitorFormat)); mInputDeviceNode >> mMonitorSpectralNode; // InputDeviceNode (and all InputNode subclasses) need to be enabled()'s to process audio. So does the Context: mInputDeviceNode->enable(); ctx->enable(); #endif }
void DeviceTestApp::setupIOProcessed() { auto ctx = audio::master(); auto mod = ctx->makeNode( new audio::GenSineNode( audio::Node::Format().autoEnable() ) ); mod->setFreq( 200 ); auto ringMod = audio::master()->makeNode( new audio::GainNode ); ringMod->setName( "RingModGain" ); ringMod->getParam()->setProcessor( mod ); mGain->disconnectAllInputs(); mInputDeviceNode >> ringMod >> mGain; mInputDeviceNode->enable(); }
void _TBOX_PREFIX_App::setup() { auto ctx = audio::Context::master(); vector<audio::DeviceRef> devices = audio::Device::getInputDevices(); console() << "List audio devices:" << endl; for( auto k=0; k < devices.size(); k++ ) console() << devices[k]->getName() << endl; // find and initialise a device by name // audio::DeviceRef dev = audio::Device::findDeviceByName( "Soundflower (2ch)" ); // mInputDeviceNode = ctx->createInputDeviceNode( dev ); initialise default input device mInputDeviceNode = ctx->createInputDeviceNode(); // initialise MonitorNode to get the PCM data auto monitorFormat = audio::MonitorNode::Format().windowSize( CIXTRACT_PCM_SIZE ); mMonitorNode = ctx->makeNode( new audio::MonitorNode( monitorFormat ) ); // pipe the input device into the MonitorNode mInputDeviceNode >> mMonitorNode; // InputDeviceNode (and all InputNode subclasses) need to be enabled()'s to process audio. So does the Context: mInputDeviceNode->enable(); ctx->enable(); // Initialise ciXtract mXtract = ciXtract::create(); mFeatures = mXtract->getFeatures(); // Features are disabled by default, call enableFeature() to enable each feature and its dependencies // You may notice a couple of "FEATURE NOT FOUND!" messages in the console, some LibXtract features are not supported yet. for( auto k=0; k < XTRACT_FEATURES; k++ ) mXtract->enableFeature( (xtract_features_)k ); }
void DeviceTestApp::setupIOClean() { mInputDeviceNode->connect( mGain ); mInputDeviceNode->enable(); }