Example #1
0
// ------------------------------------------------------
//				Test_Kinect
// ------------------------------------------------------
void Test_Kinect()
{
	// Launch grabbing thread:
	// --------------------------------------------------------
	TThreadParam thrPar;
	std::thread thHandle = std::thread(thread_grabbing, std::ref(thrPar));

	// Wait until data stream starts so we can say for sure the sensor has been
	// initialized OK:
	cout << "Waiting for sensor initialization...\n";
	do
	{
		CObservation3DRangeScan::Ptr possiblyNewObs =
			std::atomic_load(&thrPar.new_obs);
		if (possiblyNewObs && possiblyNewObs->timestamp != INVALID_TIMESTAMP)
			break;
		else
			std::this_thread::sleep_for(10ms);
	} while (!thrPar.quit);

	// Check error condition:
	if (thrPar.quit) return;

	// Feature tracking variables:
	CFeatureList trackedFeats;
	unsigned int step_num = 0;

	bool SHOW_FEAT_IDS = true;
	bool SHOW_RESPONSES = true;

	CGenericFeatureTrackerAutoPtr tracker;

	// "CFeatureTracker_KL" is by far the most robust implementation for now:
	tracker = CGenericFeatureTrackerAutoPtr(new CFeatureTracker_KL);
	tracker->enableTimeLogger(true);  // Do time profiling.

	// Set of parameters common to any tracker implementation:
	// To see all the existing params and documentation, see
	// mrpt::vision::CGenericFeatureTracker
	//  http://reference.mrpt.org/devel/structmrpt_1_1vision_1_1_c_generic_feature_tracker.html
	tracker->extra_params["add_new_features"] =
		1;  // track, AND ALSO, add new features
	tracker->extra_params["add_new_feat_min_separation"] = 25;
	tracker->extra_params["add_new_feat_max_features"] = 150;
	tracker->extra_params["add_new_feat_patch_size"] = 21;

	tracker->extra_params["minimum_KLT_response_to_add"] = 40;
	tracker->extra_params["check_KLT_response_every"] =
		5;  // Re-check the KLT-response to assure features are in good points.
	tracker->extra_params["minimum_KLT_response"] =
		25;  // Re-check the KLT-response to assure features are in good points.

	tracker->extra_params["update_patches_every"] = 0;  // Update patches

	// Specific params for "CFeatureTracker_KL"
	tracker->extra_params["window_width"] = 25;
	tracker->extra_params["window_height"] = 25;

	// Global points map:
	CColouredPointsMap globalPtsMap;
	globalPtsMap.colorScheme.scheme =
		CColouredPointsMap::cmFromIntensityImage;  // Take points color from
	// RGB+D observations
	// globalPtsMap.colorScheme.scheme =
	// CColouredPointsMap::cmFromHeightRelativeToSensorGray;

	// Create window and prepare OpenGL object in the scene:
	// --------------------------------------------------------
	mrpt::gui::CDisplayWindow3D win3D("kinect-3d-slam 3D view", 800, 600);

	win3D.setCameraAzimuthDeg(140);
	win3D.setCameraElevationDeg(20);
	win3D.setCameraZoom(8.0);
	win3D.setFOV(90);
	win3D.setCameraPointingToPoint(2.5, 0, 0);

	mrpt::opengl::CPointCloudColoured::Ptr gl_points =
		mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>();
	gl_points->setPointSize(2.5);

	mrpt::opengl::CSetOfObjects::Ptr gl_curFeats =
		mrpt::make_aligned_shared<mrpt::opengl::CSetOfObjects>();
	mrpt::opengl::CSetOfObjects::Ptr gl_keyframes =
		mrpt::make_aligned_shared<mrpt::opengl::CSetOfObjects>();

	mrpt::opengl::CPointCloudColoured::Ptr gl_points_map =
		mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>();
	gl_points_map->setPointSize(2.0);

	const double aspect_ratio =
		480.0 / 640.0;  // kinect.rows() / double( kinect.cols() );

	mrpt::opengl::CSetOfObjects::Ptr gl_cur_cam_corner =
		mrpt::opengl::stock_objects::CornerXYZSimple(0.4f, 4);

	opengl::COpenGLViewport::Ptr viewInt;
	{
		mrpt::opengl::COpenGLScene::Ptr& scene = win3D.get3DSceneAndLock();

		// Create the Opengl object for the point cloud:
		scene->insert(gl_points_map);
		scene->insert(gl_points);
		scene->insert(gl_curFeats);
		scene->insert(gl_keyframes);
		scene->insert(mrpt::make_aligned_shared<mrpt::opengl::CGridPlaneXY>());

		scene->insert(gl_cur_cam_corner);

		const int VW_WIDTH =
			350;  // Size of the viewport into the window, in pixel units.
		const int VW_HEIGHT = aspect_ratio * VW_WIDTH;

		// Create the Opengl objects for the planar images each in a separate
		// viewport:
		viewInt = scene->createViewport("view2d_int");
		viewInt->setViewportPosition(2, 2, VW_WIDTH, VW_HEIGHT);
		viewInt->setTransparent(true);

		win3D.unlockAccess3DScene();
		win3D.repaint();
	}

	CImage previous_image;

	map<TFeatureID, TPoint3D> lastVisibleFeats;
	std::vector<TPose3D>
		camera_key_frames_path;  // The 6D path of the Kinect camera.
	CPose3D
		currentCamPose_wrt_last;  // wrt last pose in "camera_key_frames_path"

	bool gl_keyframes_must_refresh =
		true;  // Need to update gl_keyframes from camera_key_frames_path??
	CObservation3DRangeScan::Ptr last_obs;
	string str_status, str_status2;

	while (win3D.isOpen() && !thrPar.quit)
	{
		CObservation3DRangeScan::Ptr possiblyNewObs =
			std::atomic_load(&thrPar.new_obs);
		if (possiblyNewObs && possiblyNewObs->timestamp != INVALID_TIMESTAMP &&
			(!last_obs || possiblyNewObs->timestamp != last_obs->timestamp))
		{
			// It IS a new observation:
			last_obs = possiblyNewObs;

			// Feature tracking -------------------------------------------
			ASSERT_(last_obs->hasIntensityImage);

			CImage theImg;  // The grabbed image:
			theImg = last_obs->intensityImage;

			// Do tracking:
			if (step_num > 1)  // we need "previous_image" to be valid.
			{
				tracker->trackFeatures(previous_image, theImg, trackedFeats);

				// Remove those now out of the image plane:
				CFeatureList::iterator itFeat = trackedFeats.begin();
				while (itFeat != trackedFeats.end())
				{
					const TFeatureTrackStatus status = (*itFeat)->track_status;
					bool eras =
						(status_TRACKED != status && status_IDLE != status);
					if (!eras)
					{
						// Also, check if it's too close to the image border:
						const float x = (*itFeat)->x;
						const float y = (*itFeat)->y;
						static const float MIN_DIST_MARGIN_TO_STOP_TRACKING =
							10;
						if (x < MIN_DIST_MARGIN_TO_STOP_TRACKING ||
							y < MIN_DIST_MARGIN_TO_STOP_TRACKING ||
							x > (last_obs->cameraParamsIntensity.ncols -
								 MIN_DIST_MARGIN_TO_STOP_TRACKING) ||
							y > (last_obs->cameraParamsIntensity.nrows -
								 MIN_DIST_MARGIN_TO_STOP_TRACKING))
						{
							eras = true;
						}
					}
					if (eras)  // Erase or keep?
						itFeat = trackedFeats.erase(itFeat);
					else
						++itFeat;
				}
			}

			// Create list of 3D features in space, wrt current camera pose:
			// --------------------------------------------------------------------
			map<TFeatureID, TPoint3D> curVisibleFeats;
			for (CFeatureList::iterator itFeat = trackedFeats.begin();
				 itFeat != trackedFeats.end(); ++itFeat)
			{
				// Pixel coordinates in the intensity image:
				const int int_x = (*itFeat)->x;
				const int int_y = (*itFeat)->y;

				// Convert to pixel coords in the range image:
				//  APPROXIMATION: Assume coordinates are equal (that's not
				//  exact!!)
				const int x = int_x;
				const int y = int_y;

				// Does this (x,y) have valid range data?
				const float d = last_obs->rangeImage(y, x);
				if (d > 0.05 && d < 10.0)
				{
					ASSERT_(
						size_t(
							last_obs->rangeImage.cols() *
							last_obs->rangeImage.rows()) ==
						last_obs->points3D_x.size());
					const size_t nPt = last_obs->rangeImage.cols() * y + x;
					curVisibleFeats[(*itFeat)->ID] = TPoint3D(
						last_obs->points3D_x[nPt], last_obs->points3D_y[nPt],
						last_obs->points3D_z[nPt]);
				}
			}

			// Load local points map from 3D points + color:
			CColouredPointsMap localPntsMap;
			localPntsMap.colorScheme.scheme =
				CColouredPointsMap::cmFromIntensityImage;
			localPntsMap.loadFromRangeScan(*last_obs);

			// Estimate our current camera pose from feature2feature matching:
			// --------------------------------------------------------------------
			if (!lastVisibleFeats.empty())
			{
				TMatchingPairList corrs;  // pairs of correspondences

				for (map<TFeatureID, TPoint3D>::const_iterator itCur =
						 curVisibleFeats.begin();
					 itCur != curVisibleFeats.end(); ++itCur)
				{
					map<TFeatureID, TPoint3D>::const_iterator itFound =
						lastVisibleFeats.find(itCur->first);
					if (itFound != lastVisibleFeats.end())
					{
						corrs.push_back(
							TMatchingPair(
								itFound->first, itCur->first, itFound->second.x,
								itFound->second.y, itFound->second.z,
								itCur->second.x, itCur->second.y,
								itCur->second.z));
					}
				}

				if (corrs.size() >= 3)
				{
					// Find matchings:
					mrpt::tfest::TSE3RobustParams params;
					params.ransac_minSetSize = 3;
					params.ransac_maxSetSizePct = 6.0 / corrs.size();

					mrpt::tfest::TSE3RobustResult results;
					bool register_ok = false;
					try
					{
						mrpt::tfest::se3_l2_robust(corrs, params, results);
						register_ok = true;
					}
					catch (std::exception&)
					{
						/* Cannot find a minimum number of matches, inconsistent
						 * parameters due to very reduced numberof matches,etc.
						 */
					}

					const CPose3D relativePose =
						CPose3D(results.transformation);

					str_status = mrpt::format(
						"%d corrs | inliers: %d | rel.pose: %s ",
						int(corrs.size()), int(results.inliers_idx.size()),
						relativePose.asString().c_str());
					str_status2 = string(
						results.inliers_idx.size() == 0
							? "LOST! Please, press 'r' to restart"
							: "");

					if (register_ok && std::abs(results.scale - 1.0) < 0.1)
					{
						// Seems a good match:
						if ((relativePose.norm() > KEYFRAMES_MIN_DISTANCE ||
							 std::abs(relativePose.yaw()) > KEYFRAMES_MIN_ANG ||
							 std::abs(relativePose.pitch()) >
								 KEYFRAMES_MIN_ANG ||
							 std::abs(relativePose.roll()) > KEYFRAMES_MIN_ANG))
						{
							// Accept this as a new key-frame pose ------------
							// Append new global pose of this key-frame:

							const CPose3D new_keyframe_global =
								CPose3D(*camera_key_frames_path.rbegin()) +
								relativePose;

							camera_key_frames_path.push_back(
								new_keyframe_global.asTPose());

							gl_keyframes_must_refresh = true;

							currentCamPose_wrt_last =
								CPose3D();  // It's (0,0,0) since the last
							// key-frame is the current pose!
							lastVisibleFeats = curVisibleFeats;

							cout << "Adding new key-frame: pose="
								 << new_keyframe_global << endl;

							// Update global map: append another map at a given
							// position:
							globalPtsMap.insertObservation(
								last_obs.get(), &new_keyframe_global);
							win3D.get3DSceneAndLock();
							gl_points_map->loadFromPointsMap(&globalPtsMap);
							win3D.unlockAccess3DScene();
						}
						else
						{
							currentCamPose_wrt_last = relativePose;
							// cout << "cur pose: " << currentCamPose_wrt_last
							// << endl;
						}
					}
				}
			}

			if (camera_key_frames_path.empty() || lastVisibleFeats.empty())
			{
				// First iteration:
				camera_key_frames_path.clear();
				camera_key_frames_path.push_back(TPose3D(0, 0, 0, 0, 0, 0));
				gl_keyframes_must_refresh = true;
				lastVisibleFeats = curVisibleFeats;

				// Update global map:
				globalPtsMap.clear();
				globalPtsMap.insertObservation(last_obs.get());

				win3D.get3DSceneAndLock();
				gl_points_map->loadFromPointsMap(&globalPtsMap);
				win3D.unlockAccess3DScene();
			}

			// Save the image for the next step:
			previous_image = theImg;

			// Draw marks on the RGB image:
			theImg.selectTextFont("10x20");
			{  // Tracked feats:
				theImg.drawFeatures(
					trackedFeats, TColor(0, 0, 255), SHOW_FEAT_IDS,
					SHOW_RESPONSES);
				theImg.textOut(
					3, 22,
					format("# feats: %u", (unsigned int)trackedFeats.size()),
					TColor(200, 20, 20));
			}

			// Update visualization ---------------------------------------

			// Show intensity image
			win3D.get3DSceneAndLock();
			viewInt->setImageView(theImg);
			win3D.unlockAccess3DScene();

			// Show 3D points & current visible feats, at the current camera 3D
			// pose "currentCamPose_wrt_last"
			// ---------------------------------------------------------------------
			if (last_obs->hasPoints3D)
			{
				const CPose3D curGlobalPose =
					CPose3D(*camera_key_frames_path.rbegin()) +
					currentCamPose_wrt_last;
				win3D.get3DSceneAndLock();
				// All 3D points:
				gl_points->loadFromPointsMap(&localPntsMap);
				gl_points->setPose(curGlobalPose);
				gl_cur_cam_corner->setPose(curGlobalPose);

				// Current visual landmarks:
				gl_curFeats->clear();
				for (map<TFeatureID, TPoint3D>::const_iterator it =
						 curVisibleFeats.begin();
					 it != curVisibleFeats.end(); ++it)
				{
					static double D = 0.02;
					mrpt::opengl::CBox::Ptr box =
						mrpt::make_aligned_shared<mrpt::opengl::CBox>(
							TPoint3D(-D, -D, -D), TPoint3D(D, D, D));
					box->setWireframe(true);
					box->setName(format("%d", int(it->first)));
					box->enableShowName(true);
					box->setLocation(it->second);
					gl_curFeats->insert(box);
				}
				gl_curFeats->setPose(curGlobalPose);

				win3D.unlockAccess3DScene();
				win3D.repaint();
			}

			win3D.get3DSceneAndLock();
			win3D.addTextMessage(
				-100, -20, format("%.02f Hz", thrPar.Hz), TColorf(0, 1, 1), 100,
				MRPT_GLUT_BITMAP_HELVETICA_18);
			win3D.unlockAccess3DScene();

			win3D.repaint();

			step_num++;

		}  // end update visualization:

		if (gl_keyframes_must_refresh)
		{
			gl_keyframes_must_refresh = false;
			// cout << "Updating gl_keyframes with " <<
			// camera_key_frames_path.size() << " frames.\n";
			win3D.get3DSceneAndLock();
			gl_keyframes->clear();
			for (size_t i = 0; i < camera_key_frames_path.size(); i++)
			{
				CSetOfObjects::Ptr obj =
					mrpt::opengl::stock_objects::CornerXYZSimple(0.3f, 3);
				obj->setPose(camera_key_frames_path[i]);
				gl_keyframes->insert(obj);
			}
			win3D.unlockAccess3DScene();
		}

		// Process possible keyboard commands:
		// --------------------------------------
		if (win3D.keyHit() && thrPar.pushed_key == 0)
		{
			const int key = tolower(win3D.getPushedKey());

			switch (key)
			{
				// Some of the keys are processed in this thread:
				case 'r':
					lastVisibleFeats.clear();
					camera_key_frames_path.clear();
					gl_keyframes_must_refresh = true;
					globalPtsMap.clear();
					win3D.get3DSceneAndLock();
					gl_points_map->loadFromPointsMap(&globalPtsMap);
					win3D.unlockAccess3DScene();

					break;
				case 's':
				{
					const std::string s = "point_cloud.txt";
					cout << "Dumping 3D point-cloud to: " << s << endl;
					globalPtsMap.save3D_to_text_file(s);
					break;
				}
				case 'o':
					win3D.setCameraZoom(win3D.getCameraZoom() * 1.2);
					win3D.repaint();
					break;
				case 'i':
					win3D.setCameraZoom(win3D.getCameraZoom() / 1.2);
					win3D.repaint();
					break;
				// ...and the rest in the kinect thread:
				default:
					thrPar.pushed_key = key;
					break;
			};
		}

		win3D.get3DSceneAndLock();
		win3D.addTextMessage(
			2, -30, format(
						"'s':save point cloud, 'r': reset, 'o'/'i': zoom "
						"out/in, mouse: orbit 3D, ESC: quit"),
			TColorf(1, 1, 1), 110, MRPT_GLUT_BITMAP_HELVETICA_12);
		win3D.addTextMessage(
			2, -50, str_status, TColorf(1, 1, 1), 111,
			MRPT_GLUT_BITMAP_HELVETICA_12);
		win3D.addTextMessage(
			2, -70, str_status2, TColorf(1, 1, 1), 112,
			MRPT_GLUT_BITMAP_HELVETICA_18);
		win3D.unlockAccess3DScene();

		std::this_thread::sleep_for(1ms);
	}

	cout << "Waiting for grabbing thread to exit...\n";
	thrPar.quit = true;
	thHandle.join();
	cout << "Bye!\n";
}
// ------------------------------------------------------
//		DoTrackingDemo
// ------------------------------------------------------
int DoTrackingDemo(CCameraSensorPtr  cam, bool  DO_SAVE_VIDEO)
{
	win = mrpt::gui::CDisplayWindow3D::Create("Tracked features",800,600);

	mrpt::vision::CVideoFileWriter  vidWritter;

	bool 		hasResolution = false;
	TCamera		cameraParams; // For now, will only hold the image resolution on the arrive of the first frame.

	TSimpleFeatureList  trackedFeats;

	unsigned int	step_num = 0;

	bool  SHOW_FEAT_IDS = true;
	bool  SHOW_RESPONSES = true;
	bool  SHOW_FEAT_TRACKS = true;


	const double SAVE_VIDEO_FPS = 30; // If DO_SAVE_VIDEO=true, the FPS of the video file
	const char*  SAVE_VIDEO_CODEC = "XVID"; // "XVID", "PIM1", "MJPG"

	bool  DO_HIST_EQUALIZE_IN_GRAYSCALE = false;
	string VIDEO_OUTPUT_FILE = "./tracking_video.avi";

	const double MAX_FPS = 5000; // 5.0;  // Hz (to slow down visualization).

	CGenericFeatureTrackerAutoPtr  tracker;

	// "CFeatureTracker_KL" is by far the most robust implementation for now:
	tracker = CGenericFeatureTrackerAutoPtr( new CFeatureTracker_KL );

	tracker->enableTimeLogger(true); // Do time profiling.

	// Set of parameters common to any tracker implementation:
	// -------------------------------------------------------------
	// To see all the existing params and documentation, see mrpt::vision::CGenericFeatureTracker
	tracker->extra_params["remove_lost_features"]         = 1;   // automatically remove out-of-image and badly tracked features

	tracker->extra_params["add_new_features"]             = 1;   // track, AND ALSO, add new features
	tracker->extra_params["add_new_feat_min_separation"]  = 32;
	tracker->extra_params["minimum_KLT_response_to_add"]  = 10;
	tracker->extra_params["add_new_feat_max_features"]    = 350;
	tracker->extra_params["add_new_feat_patch_size"]      = 11;

	tracker->extra_params["update_patches_every"]		= 0;  // Don't update patches.

	tracker->extra_params["check_KLT_response_every"]	= 5;	// Re-check the KLT-response to assure features are in good points.
	tracker->extra_params["minimum_KLT_response"]	    = 5;

	// Specific params for "CFeatureTracker_KL"
	// ------------------------------------------------------
	tracker->extra_params["window_width"]  = 5;
	tracker->extra_params["window_height"] = 5;
	//tracker->extra_params["LK_levels"] = 3;
	//tracker->extra_params["LK_max_iters"] = 10;
	//tracker->extra_params["LK_epsilon"] = 0.1;
	//tracker->extra_params["LK_max_tracking_error"] = 150;


	// --------------------------------
	// The main loop
	// --------------------------------
	CImage		previous_image;

	TSequenceFeatureObservations    feat_track_history;
	bool							save_tracked_history = true; // Dump feat_track_history to a file at the end

	TCameraPoseID 					curCamPoseId = 0;

	cout << endl << "TO END THE PROGRAM: Close the window.\n";

	mrpt::opengl::COpenGLViewportPtr gl_view;
	{
		mrpt::opengl::COpenGLScenePtr scene = win->get3DSceneAndLock();
		gl_view = scene->getViewport("main");
		win->unlockAccess3DScene();
	}

	// Aux data for drawing the recent track of features:
	static const size_t FEATS_TRACK_LEN = 10;
	std::map<TFeatureID,std::list<TPixelCoord> >  feat_tracks;

	// infinite loop, until we close the win:
	while( win->isOpen() )
	{
		CObservationPtr obs;
		try
		{
			obs= cam->getNextFrame();
		}
		catch (CExceptionEOF &)
		{	// End of a rawlog file.
			break;
		}

		if (!obs)
		{
			cerr << "*Warning* getNextFrame() returned NULL!\n";
			mrpt::system::sleep(50);
			continue;
		}

		CImage theImg;  // The grabbed image:

		if (IS_CLASS(obs,CObservationImage))
		{
			CObservationImagePtr o = CObservationImagePtr(obs);
			theImg.copyFastFrom(o->image);
		}
		else if (IS_CLASS(obs,CObservationStereoImages))
		{
			CObservationStereoImagesPtr o = CObservationStereoImagesPtr(obs);
			theImg.copyFastFrom(o->imageLeft);
		}
		else if (IS_CLASS(obs,CObservation3DRangeScan))
		{
			CObservation3DRangeScanPtr o = CObservation3DRangeScanPtr(obs);
			if (o->hasIntensityImage)
				theImg.copyFastFrom(o->intensityImage);
		}
		else
		{
			continue; // Silently ignore non-image observations.
		}

		// Make sure the image is loaded (for the case it came from a rawlog file)
		if (theImg.isExternallyStored())
			theImg.loadFromFile( theImg.getExternalStorageFileAbsolutePath());

		// Take the resolution upon first valid frame.
		if (!hasResolution)
		{
			hasResolution = true;
			// cameraParams.scaleToResolution()...
			cameraParams.ncols = theImg.getWidth();
			cameraParams.nrows = theImg.getHeight();
		}

		// Do tracking:
		if (step_num>1)  // we need "previous_image" to be valid.
		{
			// This single call makes: detection, tracking, recalculation of KLT_response, etc.
			tracker->trackFeatures(previous_image, theImg, trackedFeats);
		}

		// Save the image for the next step:
		previous_image = theImg;

		// Save history of feature observations:
		tracker->getProfiler().enter("Save history");

		for (size_t i=0;i<trackedFeats.size();++i)
		{
			TSimpleFeature &f = trackedFeats[i];

			const TPixelCoordf pxRaw(f.pt.x,f.pt.y);
			TPixelCoordf  pxUndist;
			//mrpt::vision::pinhole::undistort_point(pxRaw,pxUndist, cameraParams);
			pxUndist = pxRaw;

			feat_track_history.push_back( TFeatureObservation(f.ID,curCamPoseId, pxUndist ) );
		}
		curCamPoseId++;

		tracker->getProfiler().leave("Save history");

		// now that we're done with the image, we can directly write onto it
		//  for the display
		// ----------------------------------------------------------------
		if (DO_HIST_EQUALIZE_IN_GRAYSCALE && !theImg.isColor())
			theImg.equalizeHistInPlace();
		// Convert to color so we can draw color marks, etc.
		theImg.colorImageInPlace();

		double extra_tim_to_wait=0;

		{	// FPS:
			static CTicTac tictac;
			const double T = tictac.Tac();
			tictac.Tic();
			const double fps = 1.0/(std::max(1e-5,T));
			//theImg.filledRectangle(1,1,175,25,TColor(0,0,0));

			const int current_adapt_thres = tracker->getDetectorAdaptiveThreshold();

			theImg.selectTextFont("6x13B");
			theImg.textOut(3,3,format("FPS: %.03f Hz", fps ),TColor(200,200,0) );
			theImg.textOut(3,22,format("# feats: %u - Adaptive threshold: %i", (unsigned int)trackedFeats.size(), current_adapt_thres ),TColor(200,200,0) );

			theImg.textOut(3,41,
				format("# raw feats: %u - Removed: %u",
					(unsigned int)tracker->last_execution_extra_info.raw_FAST_feats_detected,
					(unsigned int)tracker->last_execution_extra_info.num_deleted_feats ),
					TColor(200,200,0) );

			extra_tim_to_wait = 1.0/MAX_FPS - 1.0/fps;
		}

		// Draw feature tracks
		if (SHOW_FEAT_TRACKS)
		{
			// Update new feature coords:
			tracker->getProfiler().enter("drawFeatureTracks");

			std::set<TFeatureID> observed_IDs;

			for (size_t i=0;i<trackedFeats.size();++i)
			{
				const TSimpleFeature &ft = trackedFeats[i];
				std::list<TPixelCoord> & seq = feat_tracks[ft.ID];

				observed_IDs.insert(ft.ID);

				if (seq.size()>=FEATS_TRACK_LEN) seq.erase(seq.begin());
				seq.push_back(ft.pt);

				// Draw:
				if (seq.size()>1)
				{
					const std::list<TPixelCoord>::const_iterator it_end = seq.end();

					std::list<TPixelCoord>::const_iterator it      = seq.begin();
					std::list<TPixelCoord>::const_iterator it_prev = it++;

					for (;it!=it_end;++it)
					{
						theImg.line(it_prev->x,it_prev->y,it->x,it->y, TColor(190,190,190) );
						it_prev = it;
					}
				}
			}

			tracker->getProfiler().leave("drawFeatureTracks");

			// Purge old data:
			for (std::map<TFeatureID,std::list<TPixelCoord> >::iterator it=feat_tracks.begin();it!=feat_tracks.end(); )
			{
				if (observed_IDs.find(it->first)==observed_IDs.end())
				{
					std::map<TFeatureID,std::list<TPixelCoord> >::iterator next_it = it;
					next_it++;
					feat_tracks.erase(it);
					it = next_it;
				}
				else ++it;
			}
		}

		// Draw Tracked feats:
		{
			theImg.selectTextFont("5x7");
			tracker->getProfiler().enter("drawFeatures");
			theImg.drawFeatures(trackedFeats, TColor(0,0,255), SHOW_FEAT_IDS, SHOW_RESPONSES);
			tracker->getProfiler().leave("drawFeatures");
		}


		// Update window:
		win->get3DSceneAndLock();
			gl_view->setImageView(theImg);
		win->unlockAccess3DScene();
		win->repaint();

		// Save debug output video:
		// ----------------------------------
		if (DO_SAVE_VIDEO)
		{
			static bool first = true;
			if (first)
			{
				first=false;
				if (vidWritter.open(
						VIDEO_OUTPUT_FILE,
						SAVE_VIDEO_FPS /* fps */, theImg.getSize(),
						SAVE_VIDEO_CODEC,
						true /* force color video */ ) )
				{
					cout << "[track-video] Saving tracking video to: " << VIDEO_OUTPUT_FILE << endl;
				}
				else
					cerr << "ERROR: Trying to create output video: " << VIDEO_OUTPUT_FILE << endl;
			}

			vidWritter << theImg;
		}

		if (extra_tim_to_wait>0)
			mrpt::system::sleep(1000*extra_tim_to_wait);

		step_num++;
	} // end infinite loop

	// Save tracked feats:
	if (save_tracked_history)
	{
		cout << "Saving tracked features to: tracked_feats.txt..."; cout.flush();
		feat_track_history.saveToTextFile("./tracked_feats.txt");
		cout << "Done!\n"; cout.flush();

#if 0
		// SBA:
		cout << "Saving cams.txt & pts.txt files in SBA library format..."; cout.flush();

		feat_track_history.removeFewObservedFeatures(3);
		feat_track_history.decimateCameraFrames(20);
		feat_track_history.compressIDs();

		TLandmarkLocationsVec  locs;
		TFramePosesVec         cams;
		feat_track_history.saveAsSBAFiles(locs,"pts.txt", cams, "cams.txt");


		cout << "Done!\n"; cout.flush();
#endif
	}


	return 0; // End ok.
}