/*
	Process each captured video frame.
*/
void Process()
{
	reset = true;
	while (true)
	{
		frame = capture->captureImage();
		if (frame == 0) 
			continue; // TODO: For some reason CvCam gives NULL sometimes?
		if(frame->origin==1) 
		{ 
			cvFlip(frame); 
			frame->origin=0;
		}
		if (frame->nChannels == 1) 
		{
			gray->imageData = frame->imageData;
			cvCvtColor(frame, rgb, CV_BayerGB2RGB); // TODO: Now this assumes Bayer
		} 
		else 
		{
			rgb->imageData = frame->imageData;
			cvCvtColor(frame, gray, CV_RGB2GRAY);
		}
		gray->origin = frame->origin;

		if (reset)
		{
			reset = false;
		}

		alvar::Pose pose;
		double error = GetMultiMarkerPose(frame, &cam, pose);
		bool track_ok = (error>=0.0 && error<5.0);

		if (track_ok)
		{
			// Draw cameras, points & features
			double gl_mat[16];
			pose.GetMatrixGL(gl_mat);
			model_transform->setMatrix(osg::Matrix(gl_mat));
			model_switch->setAllChildrenOn();
		}
		else
		{
			model_switch->setAllChildrenOff();
//			std::cout << "\rTrack Failed     ";
		}

		viewBG->DrawImage(rgb);
		if (viewer->done())
			break;
		viewer->frame();
		if (stop_running) 
			break;
	}
}
// main
int main(int argc, char** argv)
{
	osg::ArgumentParser arguments(&argc, argv);

	// Let's use the convenience system from ALVAR for capturing.
	// We initialize Highgui through the CaptureFactory (see manual for other options like AVI)
	alvar::CaptureFactory *factory = alvar::CaptureFactory::instance();
    alvar::CaptureFactory::CaptureDeviceVector devices = factory->enumerateDevices("highgui");

    // Check to ensure that a device was found
    if (devices.size() > 0) {
        capture = factory->createCapture(devices.front());
    }

	// Capture is central feature, so if we fail, we get out of here.
	if (capture && capture->start()) {
	   
		// Let's capture one frame to get video resolution
		IplImage *tempImg = capture->captureImage();
		videoXRes = tempImg->width;
		videoYRes = tempImg->height;

		// Calibration. See manual and ALVAR internal samples how to calibrate your camera
		// Calibration will make the marker detecting and marker pose calculation more accurate.
		if (! camera.SetCalib("calib.xml", videoXRes, videoYRes)) {
			camera.SetRes(videoXRes, videoYRes);
		}

		//Create the osg::Image for the video
		videoImage = new osg::Image;

		//Create the osg::Image for the texture (marker hiding)
		texImage = new osg::Image;
		
		//IplImage for the texture generation.
		markerHiderImage=cvCreateImage(cvSize(64, 64), 8, 4);

		// construct the viewer
		viewer = new osgViewer::Viewer(arguments);
		// Let's use window size of the video (approximate).
		viewer->setUpViewInWindow (200, 200, videoXRes, videoYRes);
		// Viewport is the same
		viewer->getCamera()->setViewport(0,0,videoXRes,videoYRes);
		viewer->setLightingMode(osg::View::HEADLIGHT);

		// Attach our own event handler to the system so we can catch the resizing events
		viewer->addEventHandler(new CSimpleWndSizeHandler(videoXRes,videoYRes ));
		
		// Set projection matrix as ALVAR recommends (based on the camera calibration)
		double p[16];
		camera.GetOpenglProjectionMatrix(p,videoXRes,videoYRes);
		viewer->getCamera()->setProjectionMatrix(osg::Matrix(p));

		// Create main root for everything
		arRoot = new osg::Group;
		arRoot->setName("ALVAR stuff (c) VTT");
		
		// Init the video background class and add it to the graph
        videoBG.Init(videoXRes,videoYRes,(tempImg->origin?true:false));
		arRoot->addChild(videoBG.GetOSGGroup());

		// Create model switch and add it the to graph
		modelSwitch = new osg::Switch;
		arRoot->addChild(modelSwitch.get());

		// Create model transformation for the markers and add them under the switch
		mtForMarkerFive = new osg::MatrixTransform;
		mtForMarkerTen = new osg::MatrixTransform;
		modelSwitch->addChild(mtForMarkerFive.get());
		modelSwitch->addChild(mtForMarkerTen.get());

		// add the texture under the marker transformation node
		mtForMarkerFive->addChild(texOnMarker.GetDrawable());

		// All models off
		modelSwitch->setAllChildrenOff();

		// load the data (models).
		modelForMarkerFive = osgDB::readNodeFile("grid.osg");
		modelForMarkerTen = osgDB::readNodeFile("axes.osg");
	    
		// If loading ok, add models under the matrixtransformation nodes.
		if(modelForMarkerFive)
			mtForMarkerFive->addChild(modelForMarkerFive.get());

		if(modelForMarkerTen)
			mtForMarkerTen->addChild(modelForMarkerTen.get());
	    
		// Tell the ALVAR the markers' size (same to all)
		// You can also specify different size for each individual markers
		markerDetector.SetMarkerSize(MARKER_SIZE);

		// Set scene data 
		viewer->setSceneData(arRoot.get());

		// And start the main loop
		while(!viewer->done()){
			//Call the rendering function over and over again.
			renderer();
		}
	} 
	// Time to close the system
	if(capture){
		capture->stop();
		delete capture;
	}

	if(markerHiderImage)
		cvReleaseImage(&markerHiderImage);
	
	return 0; // bye bye. Happy coding!
}