コード例 #1
0
ファイル: tracking.cpp プロジェクト: gamman/MRPT
			inline void trackFeatures_checkResponses<TSimpleFeatureList>(TSimpleFeatureList &featureList,const CImage &cur_gray,const float minimum_KLT_response,const unsigned int KLT_response_half_win,const unsigned int max_x, const unsigned int max_y)
			{
				if (featureList.empty()) return;

				for (int N = featureList.size()-1; N>=0 ; --N)
				{
					TSimpleFeature & ft = featureList[N];
					if (ft.track_status!=status_TRACKED)
						continue; // Skip if it's not correctly tracked.

					const unsigned int x = ft.pt.x;
					const unsigned int y = ft.pt.y;
					if (x>KLT_response_half_win && y>KLT_response_half_win && x<max_x && y<max_y)
					{	// Update response:
						ft.response = cur_gray.KLT_response(x,y,KLT_response_half_win);

						// Is it good enough? http://grooveshark.com/s/Goonies+Are+Good+Enough/2beBfO?src=5
						if (ft.response<minimum_KLT_response)
						{	// Nope!
							ft.track_status = status_LOST;
						}
					}
					else
					{	// Out of bounds
						ft.response = 0;
						ft.track_status = status_OOB;
					}
				}
			} // end of trackFeatures_checkResponses<>
コード例 #2
0
ファイル: stage2_detect.cpp プロジェクト: caomw/stereo-vo
/**  Transform a TSimpleFeatureList into a TKeyPointList (opencv compatible)
  */
void m_convert_featureList_to_keypointList( const TSimpleFeatureList & featList, TKeyPointList & kpList )
{
	kpList.resize( featList.size() );
	TKeyPointList::iterator it1		 = kpList.begin();
	TSimpleFeatureList::const_iterator it2 = featList.begin();
	for( ; it1 != kpList.end(); ++it1, ++it2 )
	{
		it1->pt.x		= it2->pt.x;
		it1->pt.y		= it2->pt.y;
		it1->response	= it2->response;
	} // end-for
} // end--m_convert_featureList_to_keypointList
コード例 #3
0
void fast_corner_detect(
	const IplImage* I, TSimpleFeatureList& corners, int barrier, uint8_t octave,
	std::vector<size_t>* out_feats_index_by_row)
	{
		auto ptr = reinterpret_cast<CVD::byte* >(I->imageData);
		CVD::BasicImage<CVD::byte> img(ptr, {I->width, I->height}, I->widthStep);

		std::vector<CVD::ImageRef> outputs;
		//reerve enough corners for every pixel
		outputs.reserve(I->width * I->height);
		F(img, outputs, barrier);
		for(auto & output : outputs)
		{
			corners.push_back_fast(output.x << octave, output.y << octave);
		}
		if(out_feats_index_by_row)
		{
				auto & counters = *out_feats_index_by_row;
				counters.assign(I->height, 0);
				for(auto & output : outputs)
				{
					counters[output.y]++;
				}
		}
	}
コード例 #4
0
ファイル: tracking.cpp プロジェクト: gamman/MRPT
			inline size_t trackFeatures_deleteOOB(
				TSimpleFeatureList &trackedFeats,
				const size_t img_width, const size_t img_height,
				const int MIN_DIST_MARGIN_TO_STOP_TRACKING)
			{
				if (trackedFeats.empty()) return 0;

				std::vector<size_t> survival_idxs;
				const size_t N = trackedFeats.size();

				// 1st: Build list of survival indexes:
				survival_idxs.reserve(N);
				for (size_t i=0;i<N;i++)
				{
					const TSimpleFeature &ft = trackedFeats[i];
					const TFeatureTrackStatus status = ft.track_status;
					bool eras = (status_TRACKED!=status && status_IDLE!=status);
					if (!eras)
					{
						// Also, check if it's too close to the image border:
						const int x= ft.pt.x;
						const int y= ft.pt.y;
						if (x<MIN_DIST_MARGIN_TO_STOP_TRACKING  || y<MIN_DIST_MARGIN_TO_STOP_TRACKING ||
							x>(img_width-MIN_DIST_MARGIN_TO_STOP_TRACKING) ||
							y>(img_height-MIN_DIST_MARGIN_TO_STOP_TRACKING))
						{
							eras = true;
						}
					}
					if (!eras) survival_idxs.push_back(i);
				}

				// 2nd: Build updated list:
				const size_t N2 = survival_idxs.size();
				const size_t n_removed = N-N2;
				for (size_t i=0;i<N2;i++)
				{
					if (survival_idxs[i]!=i)
						trackedFeats[i] = trackedFeats[ survival_idxs[i] ];
				}
				trackedFeats.resize(N2);
				return n_removed;
			} // end of trackFeatures_deleteOOB
コード例 #5
0
void CFeatureExtraction::detectFeatures_SSE2_FASTER12(const CImage &img, TSimpleFeatureList & corners, const int threshold, bool append_to_list, uint8_t octave,std::vector<size_t> * out_feats_index_by_row)
{
#if MRPT_HAS_OPENCV
	const IplImage *IPL = img.getAs<IplImage>();
	ASSERTDEB_(IPL && IPL->nChannels==1)
	if (!append_to_list) corners.clear();

	fast_corner_detect_12 (IPL,corners,threshold,octave,out_feats_index_by_row);
#else
	THROW_EXCEPTION("MRPT built without OpenCV support!")
#endif
}
コード例 #6
0
// N_fast = 9, 10, 12
void  CFeatureExtraction::extractFeaturesFASTER_N(
	const int					N_fast,
	const mrpt::utils::CImage	& inImg,
	CFeatureList			    & feats,
	unsigned int			    init_ID,
	unsigned int			    nDesiredFeatures,
	const TImageROI			    & ROI )  const
{
	MRPT_START

#if MRPT_HAS_OPENCV
	// Make sure we operate on a gray-scale version of the image:
	const CImage inImg_gray( inImg, FAST_REF_OR_CONVERT_TO_GRAY );

	const IplImage *IPL = inImg_gray.getAs<IplImage>();

	TSimpleFeatureList corners;
	TFeatureType type_of_this_feature;

	switch (N_fast)
	{
	case 9:  fast_corner_detect_9 (IPL,corners, options.FASTOptions.threshold, 0, NULL); type_of_this_feature=featFASTER9; break;
	case 10: fast_corner_detect_10(IPL,corners, options.FASTOptions.threshold, 0, NULL); type_of_this_feature=featFASTER10; break;
	case 12: fast_corner_detect_12(IPL,corners, options.FASTOptions.threshold, 0, NULL); type_of_this_feature=featFASTER12; break;
	default:
		THROW_EXCEPTION("Only the 9,10,12 FASTER detectors are implemented.")
		break;
	};

	// *All* the features have been extracted.
	const size_t N = corners.size();

	// Now:
	//  1) Sort them by "response": It's ~100 times faster to sort a list of
	//      indices "sorted_indices" than sorting directly the actual list of features "corners"
	std::vector<size_t> sorted_indices(N);
	for (size_t i=0;i<N;i++)  sorted_indices[i]=i;

	// Use KLT response
	if (options.FASTOptions.use_KLT_response ||
		nDesiredFeatures!=0 // If the user wants us to limit the number of features, we need to do it according to some quality measure
		)
	{
		const int KLT_half_win = 4;
		const int max_x = inImg_gray.getWidth() - 1 - KLT_half_win;
		const int max_y = inImg_gray.getHeight() - 1 - KLT_half_win;

		for (size_t i=0;i<N;i++)
		{
			const int x = corners[i].pt.x;
			const int y = corners[i].pt.y;
			if (x>KLT_half_win && y>KLT_half_win && x<=max_x && y<=max_y)
					corners[i].response = inImg_gray.KLT_response(x,y,KLT_half_win);
			else	corners[i].response = -100;
		}

		std::sort( sorted_indices.begin(), sorted_indices.end(), KeypointResponseSorter<TSimpleFeatureList>(corners) );
	}
	else
	{
		for (size_t i=0;i<N;i++)
			corners[i].response = 0;
	}

	//  2) Filter by "min-distance" (in options.FASTOptions.min_distance)
	//  3) Convert to MRPT CFeatureList format.
	// Steps 2 & 3 are done together in the while() below.
	// The "min-distance" filter is done by means of a 2D binary matrix where each cell is marked when one
	// feature falls within it. This is not exactly the same than a pure "min-distance" but is pretty close
	// and for large numbers of features is much faster than brute force search of kd-trees.
	// (An intermediate approach would be the creation of a mask image updated for each accepted feature, etc.)

	const bool do_filter_min_dist = options.FASTOptions.min_distance>1;

	// Used half the min-distance since we'll later mark as occupied the ranges [i-1,i+1] for a feature at "i"
	const unsigned int occupied_grid_cell_size = options.FASTOptions.min_distance/2.0;
	const float occupied_grid_cell_size_inv = 1.0f/occupied_grid_cell_size;

	unsigned int grid_lx = !do_filter_min_dist ? 1 : (unsigned int)(1 + inImg.getWidth() * occupied_grid_cell_size_inv);
	unsigned int grid_ly = !do_filter_min_dist ? 1 : (unsigned int)(1 + inImg.getHeight() * occupied_grid_cell_size_inv );

	mrpt::math::CMatrixBool  occupied_sections(grid_lx,grid_ly);  // See the comments above for an explanation.
	occupied_sections.fillAll(false);


	unsigned int	nMax		= (nDesiredFeatures!=0 && N > nDesiredFeatures) ? nDesiredFeatures : N;
	const int 		offset		= (int)this->options.patchSize/2 + 1;
	const int		size_2		= options.patchSize/2;
	const size_t 	imgH		= inImg.getHeight();
	const size_t 	imgW		= inImg.getWidth();
	unsigned int	i			= 0;
	unsigned int	cont		= 0;
	TFeatureID		nextID		= init_ID;

    if( !options.addNewFeatures )
        feats.clear();


	while( cont != nMax && i!=N )
	{
		// Take the next feature fromt the ordered list of good features:
		const TSimpleFeature &feat = corners[ sorted_indices[i] ];
		i++;

		// Patch out of the image??
		const int xBorderInf =  feat.pt.x - size_2;
		const int xBorderSup =  feat.pt.x + size_2;
		const int yBorderInf =  feat.pt.y - size_2;
		const int yBorderSup =  feat.pt.y + size_2;

		if (!( xBorderSup < (int)imgW && xBorderInf > 0 && yBorderSup < (int)imgH && yBorderInf > 0 ))
			continue; // nope, skip.

		if (do_filter_min_dist)
		{
			// Check the min-distance:
			const size_t section_idx_x = size_t(feat.pt.x * occupied_grid_cell_size_inv);
			const size_t section_idx_y = size_t(feat.pt.y * occupied_grid_cell_size_inv);

			if (occupied_sections(section_idx_x,section_idx_y))
				continue; // Already occupied! skip.

			// Mark section as occupied
			occupied_sections.set_unsafe(section_idx_x,section_idx_y, true);
			if (section_idx_x>0)	occupied_sections.set_unsafe(section_idx_x-1,section_idx_y, true);
			if (section_idx_y>0)	occupied_sections.set_unsafe(section_idx_x,section_idx_y-1, true);
			if (section_idx_x<grid_lx-1)	occupied_sections.set_unsafe(section_idx_x+1,section_idx_y, true);
			if (section_idx_y<grid_ly-1)	occupied_sections.set_unsafe(section_idx_x,section_idx_y+1, true);
		}

		// All tests passed: add new feature:
		CFeaturePtr ft		= CFeature::Create();
		ft->type			= type_of_this_feature;
		ft->ID				= nextID++;
		ft->x				= feat.pt.x;
		ft->y				= feat.pt.y;
		ft->response		= feat.response;
		ft->orientation		= 0;
		ft->scale			= 1;
		ft->patchSize		= options.patchSize;		// The size of the feature patch

		if( options.patchSize > 0 )
		{
			inImg.extract_patch(
				ft->patch,
				round( ft->x ) - offset,
				round( ft->y ) - offset,
				options.patchSize,
				options.patchSize );						// Image patch surronding the feature
		}
		feats.push_back( ft );
		++cont;
	}

#endif
	MRPT_END
}
コード例 #7
0
ファイル: tracking.cpp プロジェクト: gamman/MRPT
			inline void trackFeatures_addNewFeats<TSimpleFeatureList>(TSimpleFeatureList &featureList,const TSimpleFeatureList &new_feats, const std::vector<size_t> &sorted_indices, const size_t nNewToCheck,const size_t maxNumFeatures,const float minimum_KLT_response_to_add,const double threshold_sqr_dist_to_add_new,const size_t patchSize,const CImage &cur_gray, TFeatureID  &max_feat_ID_at_input)
			{
#if 0
				// Brute-force version:
				const int max_manhatan_dist = std::sqrt(2*threshold_sqr_dist_to_add_new);

				for (size_t i=0;i<nNewToCheck && featureList.size()<maxNumFeatures;i++)
				{
					const TSimpleFeature &feat = new_feats[ sorted_indices[i] ];
					if (feat.response<minimum_KLT_response_to_add) break; // continue;

					// Check the min-distance:
					int manh_dist = std::numeric_limits<int>::max();
					for (size_t j=0;j<featureList.size();j++)
					{
						const TSimpleFeature &existing = featureList[j];
						const int d = std::abs(existing.pt.x-feat.pt.x)+std::abs(existing.pt.y-feat.pt.y);
						mrpt::utils::keep_min(manh_dist, d);
					}

					if (manh_dist<max_manhatan_dist)
						continue; // Already occupied! skip.

					// OK: accept it
					featureList.push_back_fast(feat.pt.x,feat.pt.y);  // (x,y)
					//featureList.mark_kdtree_as_outdated();

					// Fill out the rest of data:
					TSimpleFeature &newFeat = featureList.back();

					newFeat.ID			= ++max_feat_ID_at_input;
					newFeat.response	= feat.response;
					newFeat.octave		= 0;
					newFeat.track_status = status_IDLE;  //!< Inactive: right after detection, and before being tried to track
				}
#elif 0
				// Version with an occupancy grid:
				const int grid_cell_log2 = round( std::log(std::sqrt(threshold_sqr_dist_to_add_new)*0.5)/std::log(2.0));

				int grid_lx = 1+(cur_gray.getWidth() >> grid_cell_log2);
				int grid_ly = 1+(cur_gray.getHeight()>> grid_cell_log2);

				mrpt::math::CMatrixBool & occupied_sections = featureList.getOccupiedSectionsMatrix();

				occupied_sections.setSize(grid_lx,grid_ly);  // See the comments above for an explanation.
				occupied_sections.fillAll(false);

				for (size_t i=0;i<featureList.size();i++)
				{
					const TSimpleFeature &feat = featureList[i];
					const int section_idx_x = feat.pt.x >> grid_cell_log2;
					const int section_idx_y = feat.pt.y >> grid_cell_log2;

					if (!section_idx_x || !section_idx_y || section_idx_x>=grid_lx-1 || section_idx_y>=grid_ly-1)
						continue; // This may be too radical, but speeds up the logic below...
					
					// Mark sections as occupied
					bool *ptr1 = &occupied_sections.get_unsafe(section_idx_x-1,section_idx_y-1);
					bool *ptr2 = &occupied_sections.get_unsafe(section_idx_x-1,section_idx_y  );
					bool *ptr3 = &occupied_sections.get_unsafe(section_idx_x-1,section_idx_y+1);
					ptr1[0]=ptr1[1]=ptr1[2]=true;
					ptr2[0]=ptr2[1]=ptr2[2]=true;
					ptr3[0]=ptr3[1]=ptr3[2]=true;
				}

				for (size_t i=0;i<nNewToCheck && featureList.size()<maxNumFeatures;i++)
				{
					const TSimpleFeature &feat = new_feats[ sorted_indices[i] ];
					if (feat.response<minimum_KLT_response_to_add) break; // continue;

					// Check the min-distance:
					const int section_idx_x = feat.pt.x >> grid_cell_log2;
					const int section_idx_y = feat.pt.y >> grid_cell_log2;

					if (!section_idx_x || !section_idx_y || section_idx_x>=grid_lx-2 || section_idx_y>=grid_ly-2)
						continue; // This may be too radical, but speeds up the logic below...

					if (occupied_sections(section_idx_x,section_idx_y))
						continue; // Already occupied! skip.

					// Mark section as occupied
					bool *ptr1 = &occupied_sections.get_unsafe(section_idx_x-1,section_idx_y-1);
					bool *ptr2 = &occupied_sections.get_unsafe(section_idx_x-1,section_idx_y  );
					bool *ptr3 = &occupied_sections.get_unsafe(section_idx_x-1,section_idx_y+1);

					ptr1[0]=ptr1[1]=ptr1[2]=true;
					ptr2[0]=ptr2[1]=ptr2[2]=true;
					ptr3[0]=ptr3[1]=ptr3[2]=true;

					// OK: accept it
					featureList.push_back_fast(feat.pt.x,feat.pt.y);  // (x,y)
					//featureList.mark_kdtree_as_outdated();

					// Fill out the rest of data:
					TSimpleFeature &newFeat = featureList.back();

					newFeat.ID			= ++max_feat_ID_at_input;
					newFeat.response	= feat.response;
					newFeat.octave		= 0;
					newFeat.track_status = status_IDLE;  //!< Inactive: right after detection, and before being tried to track
				}
#else
				// Version with KD-tree
				CFeatureListKDTree<TSimpleFeature>  kdtree(featureList.getVector());


				for (size_t i=0;i<nNewToCheck && featureList.size()<maxNumFeatures;i++)
				{
					const TSimpleFeature &feat = new_feats[ sorted_indices[i] ];
					if (feat.response<minimum_KLT_response_to_add) break; // continue;

					// Check the min-distance:
					double min_dist_sqr = std::numeric_limits<double>::max();

					if (!featureList.empty())
					{
						//m_timlog.enter("[CGenericFeatureTracker] add new features.kdtree");
						min_dist_sqr = kdtree.kdTreeClosestPoint2DsqrError(feat.pt.x,feat.pt.y );
						//m_timlog.leave("[CGenericFeatureTracker] add new features.kdtree");
					}

					if (min_dist_sqr>threshold_sqr_dist_to_add_new)
					{
						// OK: accept it
						featureList.push_back_fast(feat.pt.x,feat.pt.y);  // (x,y)
						kdtree.mark_as_outdated();

						// Fill out the rest of data:
						TSimpleFeature &newFeat = featureList.back();

						newFeat.ID			= ++max_feat_ID_at_input;
						newFeat.response	= feat.response;
						newFeat.octave		= 0;
						newFeat.track_status = status_IDLE;  //!< Inactive: right after detection, and before being tried to track
					}
				}

#endif
			} // end of trackFeatures_addNewFeats<>
コード例 #8
0
// ------------------------------------------------------
//		DoTrackingDemo
// ------------------------------------------------------
int DoTrackingDemo(CCameraSensorPtr  cam, bool  DO_SAVE_VIDEO)
{
	win = mrpt::gui::CDisplayWindow3D::Create("Tracked features",800,600);

	mrpt::vision::CVideoFileWriter  vidWritter;

	bool 		hasResolution = false;
	TCamera		cameraParams; // For now, will only hold the image resolution on the arrive of the first frame.

	TSimpleFeatureList  trackedFeats;

	unsigned int	step_num = 0;

	bool  SHOW_FEAT_IDS = true;
	bool  SHOW_RESPONSES = true;
	bool  SHOW_FEAT_TRACKS = true;


	const double SAVE_VIDEO_FPS = 30; // If DO_SAVE_VIDEO=true, the FPS of the video file
	const char*  SAVE_VIDEO_CODEC = "XVID"; // "XVID", "PIM1", "MJPG"

	bool  DO_HIST_EQUALIZE_IN_GRAYSCALE = false;
	string VIDEO_OUTPUT_FILE = "./tracking_video.avi";

	const double MAX_FPS = 5000; // 5.0;  // Hz (to slow down visualization).

	CGenericFeatureTrackerAutoPtr  tracker;

	// "CFeatureTracker_KL" is by far the most robust implementation for now:
	tracker = CGenericFeatureTrackerAutoPtr( new CFeatureTracker_KL );

	tracker->enableTimeLogger(true); // Do time profiling.

	// Set of parameters common to any tracker implementation:
	// -------------------------------------------------------------
	// To see all the existing params and documentation, see mrpt::vision::CGenericFeatureTracker
	tracker->extra_params["remove_lost_features"]         = 1;   // automatically remove out-of-image and badly tracked features

	tracker->extra_params["add_new_features"]             = 1;   // track, AND ALSO, add new features
	tracker->extra_params["add_new_feat_min_separation"]  = 32;
	tracker->extra_params["minimum_KLT_response_to_add"]  = 10;
	tracker->extra_params["add_new_feat_max_features"]    = 350;
	tracker->extra_params["add_new_feat_patch_size"]      = 11;

	tracker->extra_params["update_patches_every"]		= 0;  // Don't update patches.

	tracker->extra_params["check_KLT_response_every"]	= 5;	// Re-check the KLT-response to assure features are in good points.
	tracker->extra_params["minimum_KLT_response"]	    = 5;

	// Specific params for "CFeatureTracker_KL"
	// ------------------------------------------------------
	tracker->extra_params["window_width"]  = 5;
	tracker->extra_params["window_height"] = 5;
	//tracker->extra_params["LK_levels"] = 3;
	//tracker->extra_params["LK_max_iters"] = 10;
	//tracker->extra_params["LK_epsilon"] = 0.1;
	//tracker->extra_params["LK_max_tracking_error"] = 150;


	// --------------------------------
	// The main loop
	// --------------------------------
	CImage		previous_image;

	TSequenceFeatureObservations    feat_track_history;
	bool							save_tracked_history = true; // Dump feat_track_history to a file at the end

	TCameraPoseID 					curCamPoseId = 0;

	cout << endl << "TO END THE PROGRAM: Close the window.\n";

	mrpt::opengl::COpenGLViewportPtr gl_view;
	{
		mrpt::opengl::COpenGLScenePtr scene = win->get3DSceneAndLock();
		gl_view = scene->getViewport("main");
		win->unlockAccess3DScene();
	}

	// Aux data for drawing the recent track of features:
	static const size_t FEATS_TRACK_LEN = 10;
	std::map<TFeatureID,std::list<TPixelCoord> >  feat_tracks;

	// infinite loop, until we close the win:
	while( win->isOpen() )
	{
		CObservationPtr obs;
		try
		{
			obs= cam->getNextFrame();
		}
		catch (CExceptionEOF &)
		{	// End of a rawlog file.
			break;
		}

		if (!obs)
		{
			cerr << "*Warning* getNextFrame() returned NULL!\n";
			mrpt::system::sleep(50);
			continue;
		}

		CImage theImg;  // The grabbed image:

		if (IS_CLASS(obs,CObservationImage))
		{
			CObservationImagePtr o = CObservationImagePtr(obs);
			theImg.copyFastFrom(o->image);
		}
		else if (IS_CLASS(obs,CObservationStereoImages))
		{
			CObservationStereoImagesPtr o = CObservationStereoImagesPtr(obs);
			theImg.copyFastFrom(o->imageLeft);
		}
		else if (IS_CLASS(obs,CObservation3DRangeScan))
		{
			CObservation3DRangeScanPtr o = CObservation3DRangeScanPtr(obs);
			if (o->hasIntensityImage)
				theImg.copyFastFrom(o->intensityImage);
		}
		else
		{
			continue; // Silently ignore non-image observations.
		}

		// Make sure the image is loaded (for the case it came from a rawlog file)
		if (theImg.isExternallyStored())
			theImg.loadFromFile( theImg.getExternalStorageFileAbsolutePath());

		// Take the resolution upon first valid frame.
		if (!hasResolution)
		{
			hasResolution = true;
			// cameraParams.scaleToResolution()...
			cameraParams.ncols = theImg.getWidth();
			cameraParams.nrows = theImg.getHeight();
		}

		// Do tracking:
		if (step_num>1)  // we need "previous_image" to be valid.
		{
			// This single call makes: detection, tracking, recalculation of KLT_response, etc.
			tracker->trackFeatures(previous_image, theImg, trackedFeats);
		}

		// Save the image for the next step:
		previous_image = theImg;

		// Save history of feature observations:
		tracker->getProfiler().enter("Save history");

		for (size_t i=0;i<trackedFeats.size();++i)
		{
			TSimpleFeature &f = trackedFeats[i];

			const TPixelCoordf pxRaw(f.pt.x,f.pt.y);
			TPixelCoordf  pxUndist;
			//mrpt::vision::pinhole::undistort_point(pxRaw,pxUndist, cameraParams);
			pxUndist = pxRaw;

			feat_track_history.push_back( TFeatureObservation(f.ID,curCamPoseId, pxUndist ) );
		}
		curCamPoseId++;

		tracker->getProfiler().leave("Save history");

		// now that we're done with the image, we can directly write onto it
		//  for the display
		// ----------------------------------------------------------------
		if (DO_HIST_EQUALIZE_IN_GRAYSCALE && !theImg.isColor())
			theImg.equalizeHistInPlace();
		// Convert to color so we can draw color marks, etc.
		theImg.colorImageInPlace();

		double extra_tim_to_wait=0;

		{	// FPS:
			static CTicTac tictac;
			const double T = tictac.Tac();
			tictac.Tic();
			const double fps = 1.0/(std::max(1e-5,T));
			//theImg.filledRectangle(1,1,175,25,TColor(0,0,0));

			const int current_adapt_thres = tracker->getDetectorAdaptiveThreshold();

			theImg.selectTextFont("6x13B");
			theImg.textOut(3,3,format("FPS: %.03f Hz", fps ),TColor(200,200,0) );
			theImg.textOut(3,22,format("# feats: %u - Adaptive threshold: %i", (unsigned int)trackedFeats.size(), current_adapt_thres ),TColor(200,200,0) );

			theImg.textOut(3,41,
				format("# raw feats: %u - Removed: %u",
					(unsigned int)tracker->last_execution_extra_info.raw_FAST_feats_detected,
					(unsigned int)tracker->last_execution_extra_info.num_deleted_feats ),
					TColor(200,200,0) );

			extra_tim_to_wait = 1.0/MAX_FPS - 1.0/fps;
		}

		// Draw feature tracks
		if (SHOW_FEAT_TRACKS)
		{
			// Update new feature coords:
			tracker->getProfiler().enter("drawFeatureTracks");

			std::set<TFeatureID> observed_IDs;

			for (size_t i=0;i<trackedFeats.size();++i)
			{
				const TSimpleFeature &ft = trackedFeats[i];
				std::list<TPixelCoord> & seq = feat_tracks[ft.ID];

				observed_IDs.insert(ft.ID);

				if (seq.size()>=FEATS_TRACK_LEN) seq.erase(seq.begin());
				seq.push_back(ft.pt);

				// Draw:
				if (seq.size()>1)
				{
					const std::list<TPixelCoord>::const_iterator it_end = seq.end();

					std::list<TPixelCoord>::const_iterator it      = seq.begin();
					std::list<TPixelCoord>::const_iterator it_prev = it++;

					for (;it!=it_end;++it)
					{
						theImg.line(it_prev->x,it_prev->y,it->x,it->y, TColor(190,190,190) );
						it_prev = it;
					}
				}
			}

			tracker->getProfiler().leave("drawFeatureTracks");

			// Purge old data:
			for (std::map<TFeatureID,std::list<TPixelCoord> >::iterator it=feat_tracks.begin();it!=feat_tracks.end(); )
			{
				if (observed_IDs.find(it->first)==observed_IDs.end())
				{
					std::map<TFeatureID,std::list<TPixelCoord> >::iterator next_it = it;
					next_it++;
					feat_tracks.erase(it);
					it = next_it;
				}
				else ++it;
			}
		}

		// Draw Tracked feats:
		{
			theImg.selectTextFont("5x7");
			tracker->getProfiler().enter("drawFeatures");
			theImg.drawFeatures(trackedFeats, TColor(0,0,255), SHOW_FEAT_IDS, SHOW_RESPONSES);
			tracker->getProfiler().leave("drawFeatures");
		}


		// Update window:
		win->get3DSceneAndLock();
			gl_view->setImageView(theImg);
		win->unlockAccess3DScene();
		win->repaint();

		// Save debug output video:
		// ----------------------------------
		if (DO_SAVE_VIDEO)
		{
			static bool first = true;
			if (first)
			{
				first=false;
				if (vidWritter.open(
						VIDEO_OUTPUT_FILE,
						SAVE_VIDEO_FPS /* fps */, theImg.getSize(),
						SAVE_VIDEO_CODEC,
						true /* force color video */ ) )
				{
					cout << "[track-video] Saving tracking video to: " << VIDEO_OUTPUT_FILE << endl;
				}
				else
					cerr << "ERROR: Trying to create output video: " << VIDEO_OUTPUT_FILE << endl;
			}

			vidWritter << theImg;
		}

		if (extra_tim_to_wait>0)
			mrpt::system::sleep(1000*extra_tim_to_wait);

		step_num++;
	} // end infinite loop

	// Save tracked feats:
	if (save_tracked_history)
	{
		cout << "Saving tracked features to: tracked_feats.txt..."; cout.flush();
		feat_track_history.saveToTextFile("./tracked_feats.txt");
		cout << "Done!\n"; cout.flush();

#if 0
		// SBA:
		cout << "Saving cams.txt & pts.txt files in SBA library format..."; cout.flush();

		feat_track_history.removeFewObservedFeatures(3);
		feat_track_history.decimateCameraFrames(20);
		feat_track_history.compressIDs();

		TLandmarkLocationsVec  locs;
		TFramePosesVec         cams;
		feat_track_history.saveAsSBAFiles(locs,"pts.txt", cams, "cams.txt");


		cout << "Done!\n"; cout.flush();
#endif
	}


	return 0; // End ok.
}