Пример #1
0
void FxApp::setup()
{
	gl::disableVerticalSync();

	int w = 640;
	int h = 480;
	mEffects.push_back( fx::EffectRef( new fx::PassThrough( w, h ) ) );
	mEffects.push_back( fx::EffectRef( new fx::LumaOffset( w, h ) ) );
	mEffects.push_back( fx::EffectRef( new fx::Posterize( w, h ) ) );

	mParams = params::InterfaceGl( "Parameters", Vec2i( 300, 300 ) );

	for ( vector< mndl::fx::EffectRef >::iterator it = mEffects.begin(); it != mEffects.end(); ++it )
	{
		mEffectNames.push_back( (*it)->getName() );
	}

	mCurrentEffect = 0;

	// capture
	try
	{
		mCapture = Capture( 640, 480 );
		mCapture.start();
	}
	catch (...)
	{
		console() << "Failed to initialize capture" << std::endl;
	}
}
void ___PACKAGENAMEASIDENTIFIER___App::setup()
{
	try {
		mCapture = Capture( kCaptureWidth, kCaptureHeight );
		mCapture.start();
	} catch ( ... ) {
		console() << "Error with capture device." << std::endl;
		exit(1);
	}

	try {
		mShader = gl::GlslProg( loadResource( RES_SHADER_PASSTHRU ), loadResource( RES_SHADER_FRAGMENT ) );
	} catch ( gl::GlslProgCompileExc &exc ) {
		console() << "Cannot compile shader: " << exc.what() << std::endl;
		exit(1);
	}catch ( Exception &exc ){
		console() << "Cannot load shader: " << exc.what() << std::endl;
		exit(1);
	}
	
	mFbo = gl::Fbo( kWindowWidth, kWindowHeight );

	mMixColorRed = 0.0f;
	mMixColorGreen = 0.0f;
	mMixColorBlue = 0.0f;

	mParams = params::InterfaceGl( "Parameters", Vec2i( kParamsWidth, kParamsHeight ) );
	mParams.addParam( "Mix Red", &mMixColorRed, "min=-1.0 max=1.0 step=0.01 keyIncr=r keyDecr=R" );
	mParams.addParam( "Mix Green", &mMixColorGreen, "min=-1.0 max=1.0 step=0.01 keyIncr=g keyDecr=G" );
	mParams.addParam( "Mix Blue", &mMixColorBlue, "min=-1.0 max=1.0 step=0.01 keyIncr=b keyDecr=B" );


}
Пример #3
0
void RotatingCubeApp::setup()
{
	try {
		mCapture = Capture( 320, 240 );
		mCapture.start();
	}
	catch( CaptureExc &exc ) {
	    console() << "failed to initialize the webcam, what: " << exc.what() << std::endl;

	    // create a warning texture
		// if we threw in the start, we'll set the Capture to null
		mCapture.reset();
		
		TextLayout layout;
		layout.clear( Color( 0.3f, 0.3f, 0.3f ) );
		layout.setColor( Color( 1, 1, 1 ) );
		layout.setFont( Font( "Arial", 96 ) );
		layout.addCenteredLine( "No Webcam" );
		layout.addCenteredLine( "Detected" );
		mTexture = gl::Texture2d::create( layout.render() );
	}
	
	mCam.lookAt( vec3( 3, 2, -3 ), vec3( 0 ) );
	gl::enableDepthRead();
	gl::enableDepthWrite();
}
void SmilesApp::setup()
{	
    mSmileLimit = 4.0f;
    mSmileAverageNumOfFrames = 10;
    mCamIndex = 0;
    mFps = getAverageFps();
    
    try {
		mCapture = Capture( CAMWIDTH, CAMHEIGHT );
		mCapture.start();
	}
	catch( ... ) {
		console() << "Failed to initialize capture" << std::endl;
	}
    
    mSmileRect = Rectf(300,100,600,400);
    setupSmileDetector(mSmileRect.getInteriorArea().getWidth(), mSmileRect.getInteriorArea().getHeight());
    console()<< mSmileRect.getInteriorArea().getWidth() << mSmileRect.getInteriorArea().getHeight() << endl;
	mSmileThreshold = 0;
	mSmileAverageIndex = 0;
    
    mParams = params::InterfaceGl( "Parameters", Vec2i( 220, 170 ) );
    mParams.addParam( "FPS", &mFps,"", true );
    mParams.addSeparator();
	mParams.addParam( "SmileResponse", &mSmileResponse, "", true );
    mParams.addParam( "SmileThreshold", &mSmileThreshold, "", true );
    
    mParams.addParam( "mSmileLimit", &mSmileLimit );
    mParams.addParam( "mSmileAverageNumOfFrames", &mSmileAverageNumOfFrames );
    
}
void ocvFaceDetectApp::setup()
{
	mFaceCascade.load( getAssetPath( "haarcascade_frontalface_alt.xml" ).string() );
	mEyeCascade.load( getAssetPath( "haarcascade_eye.xml" ).string() );	
	
	mCapture = Capture( 640, 480 );
	mCapture.start();
}
Пример #6
0
void CaptureApp::keyDown( KeyEvent event )
{
#if defined( CINDER_MAC )
	if( event.getChar() == 'f' )
		setFullScreen( ! isFullScreen() );
	else if( event.getChar() == ' ' )
		( mCapture && mCapture.isCapturing() ) ? mCapture.stop() : mCapture.start();
#endif
}
Пример #7
0
void CaptureApp::setup()
{	
	try {
		mCapture = Capture( 640, 480 );
		mCapture.start();
	}
	catch( ... ) {
		console() << "Failed to initialize capture" << std::endl;
	}
}
void TellThatToMyCamera_v1_0App::setup()
{
    mExpressionsCascade.load(getAssetPath("haarcascade_frontalface_alt.xml").string());
    mPath= getAssetPath("ppdtest.csv").string();
    
	mCapture = Capture( 640, 480 );                 // Camera settings
	mCapture.start();
    
    read_csv(mPath, mDBimgFaces, mDBLabels);        // Read DB of faces for FaceRec algorithm
    mFisherFaceRec->train(mDBimgFaces, mDBLabels);  // Train the Fisher Face Recognizer algorithm
}
Пример #9
0
void peopleDetectApp::setup()
{
    hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
    
    capture = Capture(640, 480);
    capture.start();
    
    stillSurface = loadImage(loadAsset( "people_in_park.jpg" ) );
	stillTex = gl::Texture(stillSurface);
    
}
Пример #10
0
void ocvFaceDetectApp::setup()
{
#if defined( CINDER_MAC )
	mFaceCascade.load( getResourcePath( "haarcascade_frontalface_alt.xml" ) );
	mEyeCascade.load( getResourcePath( "haarcascade_eye.xml" ) );	
#else
	mFaceCascade.load( getAppPath() + "../../resources/haarcascade_frontalface_alt.xml" );
	mEyeCascade.load( getAppPath() + "../../resources/haarcascade_eye.xml" );	
#endif
	
	mCapture = Capture( 640, 480 );
	mCapture.start();
}
Пример #11
0
void PhotoBoothApp::setup()
{
    // Start cameara.
    try{
        
        vector<Capture::DeviceRef> devices( Capture::getDevices() );
        
        // Look for a camera called "Front Camera"
        for( vector<Capture::DeviceRef>::const_iterator deviceIt = devices.begin(); deviceIt != devices.end(); ++deviceIt ) {
            Capture::DeviceRef device = *deviceIt;
            
            if(device->getName() == "Front Camera"){
                mCapture = Capture( CAM_WIDTH, CAM_HEIGHT, device );
                mCapture.start();
            }
        }
	}
	catch( ... ) {
		console() << "Failed to initialize camera" << std::endl;
	}
    
    // Load textures
    mConfirmMessage         = loadImage( loadResource("assets/confirm_message.png"));
    mSaveTexture            = loadImage( loadResource("assets/save.png"));
    mDiscardTexture         = loadImage( loadResource("assets/discard.png"));
    mCameraButtonTexture    = loadImage( loadResource("assets/camera.png"));
    mIntroTexture           = loadImage( loadResource("assets/attract.png" ));
    mLightBg                = loadImage( loadResource("assets/bkg_light.png" ));
    mDarkBg                 = loadImage( loadResource("assets/bkg_dark.png" ));
    mNumberBg               = loadImage( loadResource("assets/countdown_bkg.png") );
    mNumberProgress         = loadImage( loadResource("assets/countdown_progress.png") );
    
    mNumberTextures.push_back( loadImage( loadResource("assets/countdown_5.png")));
    mNumberTextures.push_back( loadImage( loadResource("assets/countdown_4.png")));
    mNumberTextures.push_back( loadImage( loadResource("assets/countdown_3.png")));
    mNumberTextures.push_back( loadImage( loadResource("assets/countdown_2.png")));
    mNumberTextures.push_back( loadImage( loadResource("assets/countdown_1.png")));

    width               = getWindowWidth() / DISPLAY_SCALE;
	height              = getWindowHeight() / DISPLAY_SCALE;
    
    mCurrentState       = STATE_PREVIEW;
    
    mDiscardPos         = Vec2f(100, height + 100 );
    mSavePos            = Vec2f(width - 700, height + 100);
    mCameraButtonPos    = Vec2f(width/2 - mCameraButtonTexture.getWidth() / 2, 650 - mCameraButtonTexture.getHeight() / 2);
}
void ScreenShadersApp::setup()
{
	try {
		vector<Capture::DeviceRef> devices( Capture::getDevices() );
		mCapture = Capture( CAPTURE_WIDTH, CAPTURE_HEIGHT, devices[0] );
		mCapture.start();
	}
	catch( ... ) {
		console() << "Failed to initialize capture" << std::endl;
	}
	
	gl::Fbo::Format format;
	mFbo = gl::Fbo( CAPTURE_WIDTH, CAPTURE_HEIGHT, format );
	
	mBlurShader = gl::GlslProg( loadResource( "passThruVert.glsl" ), loadResource( "blurFrag.glsl" ) );
	mBlurKernel = gl::Texture( loadImage( loadResource( "kernel.png" ) ) );
}
void PaintingBeingsApp::setup()
{	
	setStop();

	// Setup des paramètres de OpenGL
	glShadeModel(GL_SMOOTH);
	gl::enable(GL_POLYGON_SMOOTH);
	glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST);
	gl::enableAlphaBlending();
	gl::enableDepthRead();
	gl::enableDepthWrite();

	// Setup Capture webcam
	_capture = Capture(640, 480);
	_capture.start();

	// Setup camera
	float fieldOfView = 75.0f;
	float nearCamera = 0.1f;
	float farCamera = 1000000.0f;
	_camera = CameraPersp(getWindowWidth(), getWindowHeight(), fieldOfView, nearCamera, farCamera);
	_camera.lookAt(Vec3f(0.0f, 0.0f, 50.0f), Vec3f::zero());

	// Setup arcball (camera rotation)
	float radiusArcBall = static_cast<float>(getWindowHeight() * 0.5f);
	_arcball = Arcball(getWindowSize());
	_arcball.setRadius(radiusArcBall);


	// Génération de la population d'ImageBeing
	_imageBeing = ImageBeing();

	_rectangleAlgoGen.set(-75.0f, 25.0f, -25.0f, -25.0f);
	_rectangleTextutre.set(25.0f, 25.0f, 75.0f, -25.0f);


	// Setup de l'interface
	_params = params::InterfaceGl("Options", Vec2i(200, 400));
	updateInterface(false);
}
Пример #14
0
void ICPApp::setup()
{
    mExpressionsCascade.load(getAssetPath("haarcascade_frontalface_alt.xml").string());
    mPath= getAssetPath("ppdtest.csv").string();
    
	mCapture = Capture( 640, 480 );                 // Camera settings
	mCapture.start();
    
    read_csv(mPath, mDBimgFaces, mDBLabels);        // Read DB of faces for FaceRec algorithm
    mFisherFaceRec->train(mDBimgFaces, mDBLabels);  // Train the Fisher Face Recognizer algorithm
    
    // FOR TESTING PURPOSES
    //    mSurf=(loadImage("/Users/PpD/Desktop/EcA - Pp DanY/MSc ICP/Semester 2/ICP 3/Faces DB Original/hugh_laurie_extra1.jpg"));
    //  mTexture = gl::Texture(mCinderDBimgFaces);
    //  mTexture = gl::Texture( fromOcv( input ) );
    //  cv::Mat output;
    //  mTexture = gl::Texture( fromOcv( loadImage("/Users/PpD/Desktop/emotionsrec2/data/emotions/0neutral/amy_adams_neutral.jpg") ) );    
    //  mDBLabelsTEST.push_back(0);
    //  mDBLabelsTEST.push_back(1);
    //  mFisherFaceRec->train(mDBimgFaces, mDBLabelsTEST);
    //  mFisherFaceRec->train(mDBimgFaces, mDBLabels);
}
Пример #15
0
void LEDCamApp::setup()
{
	try {
		mCapture = Capture( kCaptureWidth, kCaptureHeight );
		mCapture.start();
	} catch ( ... ) {
		console() << "Error with capture device." << std::endl;
		exit(1);
	}

	try {
		mShader = gl::GlslProg( loadResource( RES_SHADER_PASSTHRU ), loadResource( RES_SHADER_FRAGMENT ) );
	} catch ( gl::GlslProgCompileExc &exc ) {
		console() << "Cannot compile shader: " << exc.what() << std::endl;
		exit(1);
	}catch ( Exception &exc ){
		console() << "Cannot load shader: " << exc.what() << std::endl;
		exit(1);
	}
	
	mFbo = gl::Fbo( kWindowWidth, kWindowHeight );

}
Пример #16
0
void motionHistAdvApp::setup()
{
	// CAPTURE
	mCaptureTex		= gl::Texture( APP_WIDTH, APP_HEIGHT );
	mCaptureSurface = Surface( APP_WIDTH, APP_HEIGHT, false );
    mCapture		= Capture( APP_WIDTH, APP_HEIGHT );
    mCapture.start();
    
	// OPENCV
	// NOTE THE WIDTH AND HEIGTH NEED TO BE REVERSED FOR THESE MATS
    mHistory		= cv::Mat::zeros( APP_HEIGHT, APP_WIDTH, CV_32FC1 );
    mMotionMask		= cv::Mat::zeros( APP_HEIGHT, APP_WIDTH, CV_32FC1 );
    mOrientation	= cv::Mat::zeros( APP_HEIGHT, APP_WIDTH, CV_32FC1 );
    mMask			= cv::Mat::zeros( APP_HEIGHT, APP_WIDTH, CV_8UC1 );
    
	// TEXTURES
    mHistoryTex		= gl::Texture( APP_WIDTH, APP_HEIGHT, gl::Texture::Format() );
    mDiffTex		= gl::Texture( APP_WIDTH, APP_HEIGHT, gl::Texture::Format() );
    mOrientTex		= gl::Texture( APP_WIDTH, APP_HEIGHT, gl::Texture::Format() );
    mMotionMaskTex	= gl::Texture( APP_WIDTH, APP_HEIGHT, gl::Texture::Format() );
    
    mMagnitude		= 50;
}
Пример #17
0
void shaderExternalFileExampleApp::setup(){
    // Initialize shader:
	try {
        mShader = ci::gl::GlslProg( loadAsset("shader_vert.glsl"),
                                   loadAsset("shader_frag.glsl") );
	}
    catch( gl::GlslProgCompileExc &exc ) {
		console() << "Cannot compile shader: " << exc.what() << std::endl;
		exit(1);
	}
    catch( Exception &exc ){
		console() << "Cannot load shader: " << exc.what() << std::endl;
		exit(1);
	}
    
    // Initialize camera:
    try {
		mCapture = Capture( CAM_W, CAM_H );
		mCapture.start();
	}
	catch(...) { console() << "Failed to initialize camera." << endl; }
    
    
}
int main(int argc, char *argv[])
{
    try {
        // Output usage message
        std::string filename(argv[0]);
        filename = filename.substr(filename.find_last_of('\\') + 1);
        std::cout << "SampleMarkerlessDetector" << std::endl;
        std::cout << "========================" << std::endl;
        std::cout << std::endl;
        std::cout << "Description:" << std::endl;
        std::cout << "  This is an example of how to use the 'FernImageDetector' and" << std::endl;
        std::cout << "  'FernPoseEstimator' classes to detect and track an image and" << std::endl;
        std::cout << "  visualize it using 'GlutViewer'. The classification must first" << std::endl;
        std::cout << "  be trained with the SampleMarkerlessCreator sample and the" << std::endl;
        std::cout << "  resulting file passed as an argument to this sample." << std::endl;
        std::cout << std::endl;
        std::cout << "  For optimal results, a high quality USB camera or a Firewire" << std::endl;
        std::cout << "  camera is necessary. It is also advised to calibrate the camera" << std::endl;
        std::cout << "  using the SampleCamCalib sample. It should be noted that the size" << std::endl;
        std::cout << "  of the trained image will affect the optimal distance for detection." << std::endl;
        std::cout << std::endl;
        std::cout << "Usage:" << std::endl;
        std::cout << "  " << filename << " filename [device]" << std::endl;
        std::cout << std::endl;
        std::cout << "    filename the filename of classifier (.dat)" << std::endl;
        std::cout << "    device   integer selecting device from enumeration list (default 0)" << std::endl;
        std::cout << "             highgui capture devices are prefered" << std::endl;
        std::cout << std::endl;
        std::cout << "Keyboard Shortcuts:" << std::endl;
        std::cout << "  q: quit" << std::endl;
        std::cout << std::endl;

        if (argc < 2) {
            std::cout << "Filename not specified." << std::endl;
            return 0;
        }
        std::string classifierFilename(argv[1]);

        // Initialise GlutViewer and CvTestbed
        GlutViewer::Start(argc, argv, 640, 480);
        CvTestbed::Instance().SetVideoCallback(videocallback);

        // Enumerate possible capture plugins
        CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins();
        if (plugins.size() < 1) {
            std::cout << "Could not find any capture plugins." << std::endl;
            return 0;
        }

        // Display capture plugins
        std::cout << "Available Plugins: ";
        outputEnumeratedPlugins(plugins);
        std::cout << std::endl;

        // Enumerate possible capture devices
        CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices();
        if (devices.size() < 1) {
            std::cout << "Could not find any capture devices." << std::endl;
            return 0;
        }

        // Check command line argument for which device to use
        int selectedDevice = defaultDevice(devices);
        if (argc > 2) {
            selectedDevice = atoi(argv[2]);
        }
        if (selectedDevice >= (int)devices.size()) {
            selectedDevice = defaultDevice(devices);
        }
        
        // Display capture devices
        std::cout << "Enumerated Capture Devices:" << std::endl;
        outputEnumeratedDevices(devices, selectedDevice);
        std::cout << std::endl;
        
        // Create capture object from camera
        Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]);
        std::string uniqueName = devices[selectedDevice].uniqueName();

        // Handle capture lifecycle and start video capture
        // Note that loadSettings/saveSettings are not supported by all plugins
        if (cap) {
            std::cout << "Loading classifier." << std::endl;
            if (!fernDetector.read(classifierFilename)) {
                std::cout << "Loading classifier failed." << std::endl;
                cap->stop();
                delete cap;
                return 1;
            }

            std::stringstream settingsFilename;
            settingsFilename << "camera_settings_" << uniqueName << ".xml";
            calibrationFilename << "camera_calibration_" << uniqueName << ".xml";
            
            cap->start();
            cap->setResolution(640, 480);
            
            if (cap->loadSettings(settingsFilename.str())) {
                std::cout << "Loading settings: " << settingsFilename.str() << std::endl;
            }

            std::stringstream title;
            title << "SampleMarkerDetector (" << cap->captureDevice().captureType() << ")";

            CvTestbed::Instance().StartVideo(cap, title.str().c_str());

            if (cap->saveSettings(settingsFilename.str())) {
                std::cout << "Saving settings: " << settingsFilename.str() << std::endl;
            }

            cap->stop();
            delete cap;
        }
        else if (CvTestbed::Instance().StartVideo(0, argv[0])) {
        }
        else {
            std::cout << "Could not initialize the selected capture backend." << std::endl;
        }

        return 0;
    }
    catch (const std::exception &e) {
        std::cout << "Exception: " << e.what() << endl;
    }
    catch (...) {
        std::cout << "Exception: unknown" << std::endl;
    }
}
Пример #19
0
int main(int argc, char *argv[])
{
    try {
        // Output usage message
        std::string filename(argv[0]);
        filename = filename.substr(filename.find_last_of('\\') + 1);
        std::cout << "SampleMultiMarker" << std::endl;
        std::cout << "=================" << std::endl;
        std::cout << std::endl;
        std::cout << "Description:" << std::endl;
        std::cout << "  This is an example of how to use the 'MultiMarker' class to detect" << std::endl;
        std::cout << "  preconfigured multi-marker setup (see ALVAR.pdf). A large cube is drawn" << std::endl;
        std::cout << "  over the detected multi-marker even when only some of the markers are" << std::endl;
        std::cout << "  visible." << std::endl;
        std::cout << std::endl;
        std::cout << "Usage:" << std::endl;
        std::cout << "  " << filename << " [device]" << std::endl;
        std::cout << std::endl;
        std::cout << "    device    integer selecting device from enumeration list (default 0)" << std::endl;
        std::cout << "              highgui capture devices are prefered" << std::endl;
        std::cout << std::endl;
        std::cout << "Keyboard Shortcuts:" << std::endl;
        std::cout << "  v: switch between three debug visualizations" << std::endl;
        std::cout << "  l: load marker configuration from mmarker.txt" << std::endl;
        std::cout << "  d: toggle the detection of non-readable markers" << std::endl;
        std::cout << "  q: quit" << std::endl;
        std::cout << std::endl;

        // Initialise CvTestbed
        CvTestbed::Instance().SetVideoCallback(videocallback);
        CvTestbed::Instance().SetKeyCallback(keycallback);

        // Enumerate possible capture plugins
        CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins();
        if (plugins.size() < 1) {
            std::cout << "Could not find any capture plugins." << std::endl;
            return 0;
        }

        // Display capture plugins
        std::cout << "Available Plugins: ";
        outputEnumeratedPlugins(plugins);
        std::cout << std::endl;

        // Enumerate possible capture devices
        CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices();
        if (devices.size() < 1) {
            std::cout << "Could not find any capture devices." << std::endl;
            return 0;
        }

        // Check command line argument for which device to use
        int selectedDevice = defaultDevice(devices);
        if (argc > 1) {
            selectedDevice = atoi(argv[1]);
        }
        if (selectedDevice >= (int)devices.size()) {
            selectedDevice = defaultDevice(devices);
        }
        
        // Display capture devices
        std::cout << "Enumerated Capture Devices:" << std::endl;
        outputEnumeratedDevices(devices, selectedDevice);
        std::cout << std::endl;
        
        // Create capture object from camera
        Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]);
        std::string uniqueName = devices[selectedDevice].uniqueName();

        // Handle capture lifecycle and start video capture
        // Note that loadSettings/saveSettings are not supported by all plugins
        if (cap) {
            std::stringstream settingsFilename;
            settingsFilename << "camera_settings_" << uniqueName << ".xml";
            calibrationFilename << "camera_calibration_" << uniqueName << ".xml";
            
            cap->start();
            cap->setResolution(640, 480);
            
            if (cap->loadSettings(settingsFilename.str())) {
                std::cout << "Loading settings: " << settingsFilename.str() << std::endl;
            }

            std::stringstream title;
            title << "SampleMultiMarker (" << cap->captureDevice().captureType() << ")";

            CvTestbed::Instance().StartVideo(cap, title.str().c_str());

            if (cap->saveSettings(settingsFilename.str())) {
                std::cout << "Saving settings: " << settingsFilename.str() << std::endl;
            }

            cap->stop();
            delete cap;
        }
        else if (CvTestbed::Instance().StartVideo(0, argv[0])) {
        }
        else {
            std::cout << "Could not initialize the selected capture backend." << std::endl;
        }

        return 0;
    }
    catch (const std::exception &e) {
        std::cout << "Exception: " << e.what() << endl;
    }
    catch (...) {
        std::cout << "Exception: unknown" << std::endl;
    }
}
Пример #20
0
void BoidsApp::setup()
{	
	Rand::randomize();
	//setFullScreen(true);
	shouldBeFullscreen = false;
	
	mCenter				= Vec3f( getWindowWidth() * 0.5f, getWindowHeight() * 0.5f, 0.0f );
	mSaveFrames			= false;
	mIsRenderingPrint	= false;
	changeInterval		= 15.0;
	time(&lastChange);
	gravity				= false;
	
	// SETUP CAMERA
	mCameraDistance		= 350.0f;
	mEye				= Vec3f( 0.0f, 0.0f, mCameraDistance );
	mCenter				= Vec3f::zero();
	mUp					= Vec3f::yAxis();
	mCam.setPerspective( 75.0f, getWindowAspectRatio(), 5.0f, 5000.0f );
	lastFrameTime		= getElapsedSeconds();
	deltaT				= 0;
	newFlock			= 0;
	
	mParticleTexture	= gl::Texture( loadImage( loadResource( RES_PARTICLE ) ) );
	
	
	// Initialize the OpenCV input (Below added RS 2010-11-15)
	try {
		//ci::Device device = Capture.
		std::vector<boost::shared_ptr<Capture::Device> > devices = Capture::getDevices();
		
		std::cout << "Capture Devices:" << std::endl;
		std::vector<boost::shared_ptr<Capture::Device> >::iterator device_iterator;
		//for(device_iterator=devices.begin(); device_iterator!=devices.end(); device_iterator++)
		for(int i=0; i<devices.size();i++)
		{
			console() << "device " << i << ": " << devices[i]->getName() << std::endl;
			
		}
		
		//UNCOMMENT FOLLOWING LINE FOR BUILT-IN CAMERA
		//capture = Capture(320,240);
		
		//UNCOMMENT FOLLOWING LINE FOR UNIBRAIN OR WHATEVER
		capture = Capture(320,240,devices[0]);//,devices[0]);	//FIXME this is a dumb way to select a device
		
		capture.start();
		silhouetteDetector = new SilhouetteDetector(320,240);
	} catch ( ... ) {
		console() << "Failed to initialize capture device" << std::endl;
	}
	cvThreshholdLevel = 45;		
	// CREATE PARTICLE CONTROLLER
	flock_one.addBoids( NUM_INITIAL_PARTICLES );
	flock_two.addBoids( NUM_INITIAL_PARTICLES );
	flock_one.setColor(ColorA( CM_RGB, 0.784, 0.0, 0.714, 1.0));
	flock_one.silRepelStrength = -0.50f;
	flock_two.setColor(ColorA( CM_RGB, 0.0, 1.0, 0.0, 1.0));
	imageColor = ColorA( CM_RGB, 0.4, 0.4, 0.4, 1.0);
	
	flock_one.addOtherFlock(&flock_two);
	flock_two.addOtherFlock(&flock_one);
	// SETUP PARAMS
	mParams = params::InterfaceGl( "Flocking", Vec2i( 200, 310 ) );
	mParams.addParam( "Scene Rotation", &mSceneRotation, "opened=1" );//
	mParams.addSeparator();
	mParams.addParam( "Fullscreen", &shouldBeFullscreen,"keyIncr=f" ); //FIXME
	mParams.addSeparator();
	mParams.addParam( "Eye Distance", &mCameraDistance, "min=100.0 max=2000.0 step=50.0 keyIncr=s keyDecr=w" );
	//mParams.addParam( "Center Gravity", &flock_one.centralGravity, "keyIncr=g" );
	//mParams.addParam( "Flatten", &flock_one.flatten, "keyIncr=f" );
	mParams.addSeparator();
	//mParams.addParam( "Zone Radius", &flock_one.zoneRadius, "min=10.0 max=100.0 step=1.0 keyIncr=z keyDecr=Z" );
//	mParams.addParam( "Lower Thresh", &flock_one.lowerThresh, "min=0.025 max=1.0 step=0.025 keyIncr=l keyDecr=L" );
//	mParams.addParam( "Higher Thresh", &flock_one.higherThresh, "min=0.025 max=1.0 step=0.025 keyIncr=h keyDecr=H" );
//	mParams.addSeparator();
//	mParams.addParam( "Attract Strength", &flock_one.attractStrength, "min=0.001 max=0.1 step=0.001 keyIncr=a keyDecr=A" );
//	mParams.addParam( "Repel Strength", &flock_one.repelStrength, "min=0.001 max=0.1 step=0.001 keyIncr=r keyDecr=R" );
//	mParams.addParam( "Orient Strength", &flock_one.orientStrength, "min=0.001 max=0.1 step=0.001 keyIncr=o keyDecr=O" );
//	mParams.addSeparator();
	mParams.addParam( "CV Threshhold", &silhouetteDetector->cvThresholdLevel, "min=0 max=255 step=1 keyIncr=t keyDecr=T" );
	
	//setup transformation from camera space to opengl world space
	imageToScreenMap.setToIdentity();
	imageToScreenMap.translate(Vec3f(getWindowSize().x/2, getWindowSize().y/2, 0));	//translate over and down
	imageToScreenMap.scale(Vec3f(-1*getWindowSize().x/320.0f, -1*getWindowSize().y/240.0f,1.0f));	//scale up
	polygons = new vector<Vec2i_ptr_vec>();
	
	currentBoidRuleNumber = 0;
	
	// ** stuff to create boid rulesets ** //
	
	// State 1: stuckOnYou - both flocks are attracted to the silhouette
	BoidSysPair stuckOnYou;
	stuckOnYou.flockOneProps.zoneRadius			= 80.0f;
	stuckOnYou.flockOneProps.lowerThresh		= 0.5f;
	stuckOnYou.flockOneProps.higherThresh		= 0.8f;
	stuckOnYou.flockOneProps.attractStrength	= 0.004f;
	stuckOnYou.flockOneProps.repelStrength		= 0.01f;
	stuckOnYou.flockOneProps.orientStrength		= 0.01f;
	stuckOnYou.flockOneProps.silThresh			= 1000.0f;
	stuckOnYou.flockOneProps.silRepelStrength	= -0.50f;
	stuckOnYou.flockOneProps.gravity			= false;
	stuckOnYou.flockOneProps.baseColor			= ColorA( CM_RGB, 0.784, 0.0, 0.714, 1.0);
	
	stuckOnYou.flockTwoProps=stuckOnYou.flockOneProps;
	stuckOnYou.imageColor						= ColorA( 0.08f, 0.0f, 0.1f, 1.0f);
	boidRulesets.push_back(stuckOnYou);
	
	// State 2: repel - both flocks are repeled by the silhouette
	BoidSysPair repel;
	repel.flockOneProps.zoneRadius				= 80.0f;
	repel.flockOneProps.lowerThresh				= 0.5f;
	repel.flockOneProps.higherThresh			= 0.8f;
	repel.flockOneProps.attractStrength			= 0.004f;
	repel.flockOneProps.repelStrength			= 0.01f;
	repel.flockOneProps.orientStrength			= 0.01f;
	repel.flockOneProps.silThresh				= 500.0f;
	repel.flockOneProps.silRepelStrength		= 1.00f;
	repel.flockOneProps.gravity					= false;
	repel.flockOneProps.baseColor				= ColorA( CM_RGB, 0.157, 1.0, 0.0,1.0);

	repel.flockTwoProps=repel.flockOneProps;
	repel.imageColor							= ColorA( 0.08f, 0.0f, 0.1f, 1.0f);	
	boidRulesets.push_back(repel);
	
	
	//// State 3: grav - like repel, but with gravity added
	BoidSysPair grav;
	grav.flockOneProps=repel.flockOneProps;
	grav.flockOneProps.gravity					= true;
	grav.flockOneProps.repelStrength			= 0.005f;
	grav.flockOneProps.baseColor				= ColorA( CM_RGB, 0.157, 0.0, 1.0,1.0);
	grav.flockTwoProps=grav.flockOneProps;
	grav.imageColor								= ColorA( 0.08f, 0.0f, 0.1f, 1.0f);
	boidRulesets.push_back(grav);
	
	
	//// State 4: diff - one flock is attracted to the silhouette and the other is repeled
	BoidSysPair diff;
	
	diff.flockOneProps=repel.flockOneProps;
	diff.flockTwoProps=stuckOnYou.flockOneProps;
	diff.imageColor								= ColorA( 0.08f, 0.0f, 0.1f, 1.0f);
	boidRulesets.push_back(diff);
	 
	
}
Пример #21
0
int main(int argc, char *argv[])
{
    try {
        // Output usage message
        std::string filename(argv[0]);
        filename = filename.substr(filename.find_last_of('\\') + 1);
        std::cout << "SampleTrack" << std::endl;
        std::cout << "===========" << std::endl;
        std::cout << std::endl;
        std::cout << "Description:" << std::endl;
        std::cout << "  This is an example of how to use the 'TrackerPsa', 'TrackerPsaRot'," << std::endl;
        std::cout << "  'TrackerFeatures', 'TrackerStat' and 'TrackerStatRot' classes to" << std::endl;
        std::cout << "  track the optical flow of the video." << std::endl;
        std::cout << std::endl;
        std::cout << "Usage:" << std::endl;
        std::cout << "  " << filename << " [device]" << std::endl;
        std::cout << std::endl;
        std::cout << "    device    integer selecting device from enumeration list (default 0)" << std::endl;
        std::cout << "              highgui capture devices are prefered" << std::endl;
        std::cout << std::endl;
        std::cout << "Keyboard Shortcuts:" << std::endl;
        std::cout << "  r,t: reset tracker" << std::endl;
        std::cout << "  n,space: cycle through tracking algorithms" << std::endl;
        std::cout << "  q: quit" << std::endl;
        std::cout << std::endl;

        // Initialize font
        cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 1.0, 1.0);

        // Initialise CvTestbed
        CvTestbed::Instance().SetVideoCallback(videocallback);
        CvTestbed::Instance().SetKeyCallback(keycallback);

        // Enumerate possible capture plugins
        CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins();
        if (plugins.size() < 1) {
            std::cout << "Could not find any capture plugins." << std::endl;
            return 0;
        }

        // Display capture plugins
        std::cout << "Available Plugins: ";
        outputEnumeratedPlugins(plugins);
        std::cout << std::endl;

        // Enumerate possible capture devices
        CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices();
        if (devices.size() < 1) {
            std::cout << "Could not find any capture devices." << std::endl;
            return 0;
        }

        // Check command line argument for which device to use
        int selectedDevice = defaultDevice(devices);
        if (argc > 1) {
            selectedDevice = atoi(argv[1]);
        }
        if (selectedDevice >= (int)devices.size()) {
            selectedDevice = defaultDevice(devices);
        }
        
        // Display capture devices
        std::cout << "Enumerated Capture Devices:" << std::endl;
        outputEnumeratedDevices(devices, selectedDevice);
        std::cout << std::endl;
        
        // Create capture object from camera
        Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]);
        std::string uniqueName = devices[selectedDevice].uniqueName();

        // Handle capture lifecycle and start video capture
        // Note that loadSettings/saveSettings are not supported by all plugins
        if (cap) {
            std::stringstream settingsFilename;
            settingsFilename << "camera_settings_" << uniqueName << ".xml";
            calibrationFilename << "camera_calibration_" << uniqueName << ".xml";
            
            cap->start();
            cap->setResolution(640, 480);
            
            if (cap->loadSettings(settingsFilename.str())) {
                std::cout << "Loading settings: " << settingsFilename.str() << std::endl;
            }

            std::stringstream title;
            title << "SampleTrack (" << cap->captureDevice().captureType() << ")";

            CvTestbed::Instance().StartVideo(cap, title.str().c_str());

            if (cap->saveSettings(settingsFilename.str())) {
                std::cout << "Saving settings: " << settingsFilename.str() << std::endl;
            }

            cap->stop();
            delete cap;
        }
        else if (CvTestbed::Instance().StartVideo(0, argv[0])) {
        }
        else {
            std::cout << "Could not initialize the selected capture backend." << std::endl;
        }

        return 0;
    }
    catch (const std::exception &e) {
        std::cout << "Exception: " << e.what() << endl;
    }
    catch (...) {
        std::cout << "Exception: unknown" << std::endl;
    }
}
Пример #22
0
int main(int argc, char *argv[])
{
    try {
        // Output usage message
        std::string filename(argv[0]);
        filename = filename.substr(filename.find_last_of('\\') + 1);
        std::cout << "SampleIntegralImage" << std::endl;
        std::cout << "===================" << std::endl;
        std::cout << std::endl;
        std::cout << "Description:" << std::endl;
        std::cout << "  This is an example of how to use the 'IntegralImage' and" << std::endl;
        std::cout << "  'IntegralGradient' classes. The vertical (green) and horizontal (red)" << std::endl;
        std::cout << "  whole image projections are computed using 'IntegralImage::GetSubimage'" << std::endl;
        std::cout << "  and shown in the SampleIntegralImage window. The gradients of the" << std::endl;
        std::cout << "  image edges are shown in the Gradient window. The edges are detected" << std::endl;
        std::cout << "  using the Canny edge detector where t1 and t2 are parameters for the" << std::endl;
        std::cout << "  Canny algorithm. The gradients are drawn in red and their local normals" << std::endl;
        std::cout << "  are drawn in blue." << std::endl;
        std::cout << std::endl;
        std::cout << "Usage:" << std::endl;
        std::cout << "  " << filename << " [device]" << std::endl;
        std::cout << std::endl;
        std::cout << "    device    integer selecting device from enumeration list (default 0)" << std::endl;
        std::cout << "              highgui capture devices are prefered" << std::endl;
        std::cout << std::endl;
        std::cout << "Keyboard Shortcuts:" << std::endl;
        std::cout << "  0: show/hide gradient image" << std::endl;
        std::cout << "  1: show/hide grayscale image" << std::endl;
        std::cout << "  2: show/hide vertical image" << std::endl;
        std::cout << "  3: show/hide horizontal image" << std::endl;
        std::cout << "  4: show/hide canny image" << std::endl;
        std::cout << "  q: quit" << std::endl;
        std::cout << std::endl;

        // Initialise CvTestbed
        CvTestbed::Instance().SetVideoCallback(videocallback);

        // Enumerate possible capture plugins
        CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins();
        if (plugins.size() < 1) {
            std::cout << "Could not find any capture plugins." << std::endl;
            return 0;
        }

        // Display capture plugins
        std::cout << "Available Plugins: ";
        outputEnumeratedPlugins(plugins);
        std::cout << std::endl;

        // Enumerate possible capture devices
        CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices();
        if (devices.size() < 1) {
            std::cout << "Could not find any capture devices." << std::endl;
            return 0;
        }

        // Check command line argument for which device to use
        int selectedDevice = defaultDevice(devices);
        if (argc > 1) {
            selectedDevice = atoi(argv[1]);
        }
        if (selectedDevice >= (int)devices.size()) {
            selectedDevice = defaultDevice(devices);
        }
        
        // Display capture devices
        std::cout << "Enumerated Capture Devices:" << std::endl;
        outputEnumeratedDevices(devices, selectedDevice);
        std::cout << std::endl;
        
        // Create capture object from camera
        Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]);
        std::string uniqueName = devices[selectedDevice].uniqueName();

        // Handle capture lifecycle and start video capture
        // Note that loadSettings/saveSettings are not supported by all plugins
        if (cap) {
            std::stringstream settingsFilename;
            settingsFilename << "camera_settings_" << uniqueName << ".xml";
            
            cap->start();
            cap->setResolution(640, 480);
            
            if (cap->loadSettings(settingsFilename.str())) {
                std::cout << "Loading settings: " << settingsFilename.str() << std::endl;
            }

            std::stringstream title;
            title << "SampleIntegralImage (" << cap->captureDevice().captureType() << ")";

            CvTestbed::Instance().StartVideo(cap, title.str().c_str());

            if (cap->saveSettings(settingsFilename.str())) {
                std::cout << "Saving settings: " << settingsFilename.str() << std::endl;
            }

            cap->stop();
            delete cap;
        }
        else if (CvTestbed::Instance().StartVideo(0, argv[0])) {
        }
        else {
            std::cout << "Could not initialize the selected capture backend." << std::endl;
        }

        return 0;
    }
    catch (const std::exception &e) {
        std::cout << "Exception: " << e.what() << endl;
    }
    catch (...) {
        std::cout << "Exception: unknown" << std::endl;
    }
}
Пример #23
0
int main(int argc, char *argv[])
{
    try {
        // Output usage message
        std::string filename(argv[0]);
        filename = filename.substr(filename.find_last_of('\\') + 1);

        // Initialise GlutViewer and CvTestbed
        GlutViewer::Start(argc, argv, 640, 480);
        CvTestbed::Instance().SetVideoCallback(videocallback);

        // Create capture object from camera (argv[1] is a number) or from file (argv[1] is a string)
        Capture *cap;
        std::string uniqueName;
        if ((argc > 1) && (!isdigit(argv[1][0]))) {
            // Manually create capture device and initialize capture object
            CaptureDevice device("file", argv[1]);
            cap = CaptureFactory::instance()->createCapture(device);
            uniqueName = "file";
        }
        else {    
            // Enumerate possible capture plugins
            CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins();
            if (plugins.size() < 1) {
                std::cout << "Could not find any capture plugins." << std::endl;
                return 0;
            }

            // Display capture plugins
            std::cout << "Available Plugins: ";
            outputEnumeratedPlugins(plugins);
            std::cout << std::endl;

            // Enumerate possible capture devices
            CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices();
            if (devices.size() < 1) {
                std::cout << "Could not find any capture devices." << std::endl;
                return 0;
            }

            // Check command line argument for which device to use
            int selectedDevice = defaultDevice(devices);
            if (argc > 1) {
                selectedDevice = atoi(argv[1]);
            }
            if (selectedDevice >= (int)devices.size()) {
                selectedDevice = defaultDevice(devices);
            }
            
            // Display capture devices
            std::cout << "Enumerated Capture Devices:" << std::endl;
            outputEnumeratedDevices(devices, selectedDevice);
            std::cout << std::endl;
            
            // Create capture object from camera
            cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]);
            uniqueName = devices[selectedDevice].uniqueName();
        }

        // Handle capture lifecycle and start video capture
        // Note that loadSettings/saveSettings are not supported by all plugins
        if (cap) {
            std::stringstream settingsFilename;
            settingsFilename << "camera_settings_" << uniqueName << ".xml";
            calibrationFilename << "camera_calibration_" << uniqueName << ".xml";
            
            cap->start();
            cap->setResolution(640, 480);
            
            if (cap->loadSettings(settingsFilename.str())) {
                std::cout << "Loading settings: " << settingsFilename.str() << std::endl;
            }

            std::stringstream title;
            title << "SampleMarkerDetector (" << cap->captureDevice().captureType() << ")";

            CvTestbed::Instance().StartVideo(cap, title.str().c_str());

            if (cap->saveSettings(settingsFilename.str())) {
                std::cout << "Saving settings: " << settingsFilename.str() << std::endl;
            }

            cap->stop();
            delete cap;
        }
        else if (CvTestbed::Instance().StartVideo(0, argv[0])) {
        }
        else {
            std::cout << "Could not initialize the selected capture backend." << std::endl;
        }

        return 0;
    }
    catch (const std::exception &e) {
        std::cout << "Exception: " << e.what() << endl;
    }
    catch (...) {
        std::cout << "Exception: unknown" << std::endl;
    }
}