/** Callback: On recalc local map & publish it */
	void onDoPublish(const ros::TimerEvent& )
	{
		mrpt::utils::CTimeLoggerEntry tle(m_profiler,"onDoPublish");

		// Purge old observations & latch a local copy:
		TListObservations obs;
		{
			mrpt::utils::CTimeLoggerEntry tle(m_profiler,"onDoPublish.removingOld");
			m_hist_obs_mtx.lock();

			// Purge old obs:
			if (!m_hist_obs.empty())
			{
				const double last_time = m_hist_obs.rbegin()->first;
				TListObservations::iterator it_first_valid = m_hist_obs.lower_bound( last_time-m_time_window );
				const size_t nToRemove = std::distance( m_hist_obs.begin(), it_first_valid );
				ROS_DEBUG("[onDoPublish] Removing %u old entries, last_time=%lf",static_cast<unsigned int>(nToRemove), last_time);
				m_hist_obs.erase(m_hist_obs.begin(), it_first_valid);
			}
			// Local copy in this thread:
			obs = m_hist_obs;
			m_hist_obs_mtx.unlock();
		}

		ROS_DEBUG("Building local map with %u observations.",static_cast<unsigned int>(obs.size()));
		if (obs.empty())
			return;

		// Build local map(s):
		// -----------------------------------------------
		m_localmap_pts.clear();
		mrpt::poses::CPose3D curRobotPose;
		{
			mrpt::utils::CTimeLoggerEntry tle2(m_profiler,"onDoPublish.buildLocalMap");

			// Get the latest robot pose in the reference frame (typ: /odom -> /base_link)
			// so we can build the local map RELATIVE to it:
			try {
				tf::StampedTransform tx;
				m_tf_listener.lookupTransform(m_frameid_reference,m_frameid_robot, ros::Time(0), tx);
				mrpt_bridge::convert(tx,curRobotPose);
				ROS_DEBUG("[onDoPublish] Building local map relative to latest robot pose: %s", curRobotPose.asString().c_str() );
			}
			catch (tf::TransformException &ex) {
				ROS_ERROR("%s",ex.what());
				return;
			}

			// For each observation: compute relative robot pose & insert obs into map:
			for (TListObservations::const_iterator it=obs.begin();it!=obs.end();++it)
			{
				const TInfoPerTimeStep & ipt = it->second;

				// Relative pose in the past:
				mrpt::poses::CPose3D relPose(mrpt::poses::UNINITIALIZED_POSE);
				relPose.inverseComposeFrom( ipt.robot_pose, curRobotPose );

				// Insert obs:
				m_localmap_pts.insertObservationPtr(ipt.observation,&relPose);

			} // end for

		}

		// Publish them:
		if (m_pub_local_map_pointcloud.getNumSubscribers()>0)
		{
			sensor_msgs::PointCloudPtr msg_pts = sensor_msgs::PointCloudPtr( new sensor_msgs::PointCloud);
			msg_pts->header.frame_id = m_frameid_robot;
			msg_pts->header.stamp = ros::Time( obs.rbegin()->first );
			mrpt_bridge::point_cloud::mrpt2ros(m_localmap_pts,msg_pts->header, *msg_pts);
			m_pub_local_map_pointcloud.publish(msg_pts);
		}

		// Show gui:
		if (m_show_gui)
		{
			if (!m_gui_win)
			{
				m_gui_win = mrpt::gui::CDisplayWindow3D::Create("LocalObstaclesNode",800,600);
				mrpt::opengl::COpenGLScenePtr &scene = m_gui_win->get3DSceneAndLock();
				scene->insert( mrpt::opengl::CGridPlaneXY::Create() );
				scene->insert( mrpt::opengl::stock_objects::CornerXYZSimple(1.0,4.0) );

				mrpt::opengl::CSetOfObjectsPtr gl_obs = mrpt::opengl::CSetOfObjects::Create();
				gl_obs->setName("obstacles");
				scene->insert( gl_obs );

				mrpt::opengl::CPointCloudPtr gl_pts = mrpt::opengl::CPointCloud::Create();
				gl_pts->setName("points");
				gl_pts->setPointSize(2.0);
				gl_pts->setColor_u8( mrpt::utils::TColor(0x0000ff) );
				scene->insert( gl_pts );

				m_gui_win->unlockAccess3DScene();
			}

			mrpt::opengl::COpenGLScenePtr &scene = m_gui_win->get3DSceneAndLock();
			mrpt::opengl::CSetOfObjectsPtr gl_obs = mrpt::opengl::CSetOfObjectsPtr( scene->getByName("obstacles") );
			ROS_ASSERT(gl_obs.present());
			gl_obs->clear();

			mrpt::opengl::CPointCloudPtr gl_pts = mrpt::opengl::CPointCloudPtr( scene->getByName("points") );

			for (TListObservations::const_iterator it=obs.begin();it!=obs.end();++it)
			{
				const TInfoPerTimeStep & ipt = it->second;
				// Relative pose in the past:
				mrpt::poses::CPose3D relPose(mrpt::poses::UNINITIALIZED_POSE);
				relPose.inverseComposeFrom( ipt.robot_pose, curRobotPose );

				mrpt::opengl::CSetOfObjectsPtr gl_axis = mrpt::opengl::stock_objects::CornerXYZSimple(0.9,2.0);
				gl_axis->setPose(relPose);
				gl_obs->insert(gl_axis);
			} // end for

			gl_pts->loadFromPointsMap(&m_localmap_pts);


			m_gui_win->unlockAccess3DScene();
			m_gui_win->repaint();
		}


	} // onDoPublish
Beispiel #2
0
// ------------------------------------------------------
//				TestRANSACPlanes
// ------------------------------------------------------
void TestRANSACPlanes()
{
	randomGenerator.randomize();

	// Generate random points:
	// ------------------------------------
	const size_t N_PLANES = 3;

	const size_t N_plane = 300;
	const size_t N_noise = 300;

	const double PLANE_EQ[N_PLANES][4]={ 
		{ 1,-1,1, -2 },
		{ 1,+1.5, 1, -1 },
		{ 0,-1,1, +2 } };

	CVectorDouble xs,ys,zs;
	for (size_t p=0;p<N_PLANES;p++)
	{
		for (size_t i=0;i<N_plane;i++)
		{
			const double xx = randomGenerator.drawUniform(-3,3) + 5*cos(0.4*p);
			const double yy = randomGenerator.drawUniform(-3,3) + 5*sin(0.4*p);
			const double zz = -(PLANE_EQ[p][3]+PLANE_EQ[p][0]*xx+PLANE_EQ[p][1]*yy)/PLANE_EQ[p][2];
			xs.push_back(xx);
			ys.push_back(yy);
			zs.push_back(zz);
		}
	}

	for (size_t i=0;i<N_noise;i++)
	{
		xs.push_back( randomGenerator.drawUniform(-7,7));
		ys.push_back( randomGenerator.drawUniform(-7,7));
		zs.push_back( randomGenerator.drawUniform(-7,7));
	}


	// Run RANSAC
	// ------------------------------------
	vector<pair<size_t,TPlane> >   detectedPlanes;
	const double DIST_THRESHOLD = 0.05;

	CTicTac	tictac;

	ransac_detect_3D_planes(xs,ys,zs,detectedPlanes,DIST_THRESHOLD,40 );

	// Display output:
	cout << "RANSAC method: ransac_detect_3D_planes" << endl;
	cout << " Computation time: " << tictac.Tac()*1000.0 << " ms" << endl;
	cout << " " << detectedPlanes.size() << " planes detected." << endl;


	// Show GUI
	// --------------------------
	win = mrpt::gui::CDisplayWindow3DPtr( new mrpt::gui::CDisplayWindow3D("RANSAC: 3D planes", 500,500));

	opengl::COpenGLScenePtr scene = opengl::COpenGLScene::Create();

	scene->insert( opengl::CGridPlaneXY::Create(-20,20,-20,20,0,1) );
	scene->insert( opengl::stock_objects::CornerXYZ() );

	for (vector<pair<size_t,TPlane> >::iterator p=detectedPlanes.begin();p!=detectedPlanes.end();++p)
	{
		opengl::CTexturedPlanePtr glPlane = opengl::CTexturedPlane::Create(-10,10,-10,10);

		CPose3D   glPlanePose;
		p->second.getAsPose3D( glPlanePose );
		glPlane->setPose(glPlanePose);

		glPlane->setColor( randomGenerator.drawUniform(0,1), randomGenerator.drawUniform(0,1),randomGenerator.drawUniform(0,1), 0.6);

		scene->insert( glPlane );
	}

	{
		opengl::CPointCloudPtr  points = opengl::CPointCloud::Create();
		points->setColor(0,0,1);
		points->setPointSize(3);
		points->enableColorFromZ();

		// Convert double -> float:
		vector<float> xsf,ysf,zsf;
		metaprogramming::copy_container_typecasting(xs,xsf);
		metaprogramming::copy_container_typecasting(ys,ysf);
		metaprogramming::copy_container_typecasting(zs,zsf);

		points->setAllPoints(xsf,ysf,zsf);

		scene->insert( points );
	}

	win->get3DSceneAndLock() = scene;
	win->unlockAccess3DScene();
	win->forceRepaint();


	win->waitForKey();
}
// ------------------------------------------------------
//		DoTrackingDemo
// ------------------------------------------------------
int DoTrackingDemo(CCameraSensorPtr  cam, bool  DO_SAVE_VIDEO)
{
	win = mrpt::gui::CDisplayWindow3D::Create("Tracked features",800,600);

	mrpt::vision::CVideoFileWriter  vidWritter;

	bool 		hasResolution = false;
	TCamera		cameraParams; // For now, will only hold the image resolution on the arrive of the first frame.

	TSimpleFeatureList  trackedFeats;

	unsigned int	step_num = 0;

	bool  SHOW_FEAT_IDS = true;
	bool  SHOW_RESPONSES = true;
	bool  SHOW_FEAT_TRACKS = true;


	const double SAVE_VIDEO_FPS = 30; // If DO_SAVE_VIDEO=true, the FPS of the video file
	const char*  SAVE_VIDEO_CODEC = "XVID"; // "XVID", "PIM1", "MJPG"

	bool  DO_HIST_EQUALIZE_IN_GRAYSCALE = false;
	string VIDEO_OUTPUT_FILE = "./tracking_video.avi";

	const double MAX_FPS = 5000; // 5.0;  // Hz (to slow down visualization).

	CGenericFeatureTrackerAutoPtr  tracker;

	// "CFeatureTracker_KL" is by far the most robust implementation for now:
	tracker = CGenericFeatureTrackerAutoPtr( new CFeatureTracker_KL );

	tracker->enableTimeLogger(true); // Do time profiling.

	// Set of parameters common to any tracker implementation:
	// -------------------------------------------------------------
	// To see all the existing params and documentation, see mrpt::vision::CGenericFeatureTracker
	tracker->extra_params["remove_lost_features"]         = 1;   // automatically remove out-of-image and badly tracked features

	tracker->extra_params["add_new_features"]             = 1;   // track, AND ALSO, add new features
	tracker->extra_params["add_new_feat_min_separation"]  = 32;
	tracker->extra_params["minimum_KLT_response_to_add"]  = 10;
	tracker->extra_params["add_new_feat_max_features"]    = 350;
	tracker->extra_params["add_new_feat_patch_size"]      = 11;

	tracker->extra_params["update_patches_every"]		= 0;  // Don't update patches.

	tracker->extra_params["check_KLT_response_every"]	= 5;	// Re-check the KLT-response to assure features are in good points.
	tracker->extra_params["minimum_KLT_response"]	    = 5;

	// Specific params for "CFeatureTracker_KL"
	// ------------------------------------------------------
	tracker->extra_params["window_width"]  = 5;
	tracker->extra_params["window_height"] = 5;
	//tracker->extra_params["LK_levels"] = 3;
	//tracker->extra_params["LK_max_iters"] = 10;
	//tracker->extra_params["LK_epsilon"] = 0.1;
	//tracker->extra_params["LK_max_tracking_error"] = 150;


	// --------------------------------
	// The main loop
	// --------------------------------
	CImage		previous_image;

	TSequenceFeatureObservations    feat_track_history;
	bool							save_tracked_history = true; // Dump feat_track_history to a file at the end

	TCameraPoseID 					curCamPoseId = 0;

	cout << endl << "TO END THE PROGRAM: Close the window.\n";

	mrpt::opengl::COpenGLViewportPtr gl_view;
	{
		mrpt::opengl::COpenGLScenePtr scene = win->get3DSceneAndLock();
		gl_view = scene->getViewport("main");
		win->unlockAccess3DScene();
	}

	// Aux data for drawing the recent track of features:
	static const size_t FEATS_TRACK_LEN = 10;
	std::map<TFeatureID,std::list<TPixelCoord> >  feat_tracks;

	// infinite loop, until we close the win:
	while( win->isOpen() )
	{
		CObservationPtr obs;
		try
		{
			obs= cam->getNextFrame();
		}
		catch (CExceptionEOF &)
		{	// End of a rawlog file.
			break;
		}

		if (!obs)
		{
			cerr << "*Warning* getNextFrame() returned NULL!\n";
			mrpt::system::sleep(50);
			continue;
		}

		CImage theImg;  // The grabbed image:

		if (IS_CLASS(obs,CObservationImage))
		{
			CObservationImagePtr o = CObservationImagePtr(obs);
			theImg.copyFastFrom(o->image);
		}
		else if (IS_CLASS(obs,CObservationStereoImages))
		{
			CObservationStereoImagesPtr o = CObservationStereoImagesPtr(obs);
			theImg.copyFastFrom(o->imageLeft);
		}
		else if (IS_CLASS(obs,CObservation3DRangeScan))
		{
			CObservation3DRangeScanPtr o = CObservation3DRangeScanPtr(obs);
			if (o->hasIntensityImage)
				theImg.copyFastFrom(o->intensityImage);
		}
		else
		{
			continue; // Silently ignore non-image observations.
		}

		// Make sure the image is loaded (for the case it came from a rawlog file)
		if (theImg.isExternallyStored())
			theImg.loadFromFile( theImg.getExternalStorageFileAbsolutePath());

		// Take the resolution upon first valid frame.
		if (!hasResolution)
		{
			hasResolution = true;
			// cameraParams.scaleToResolution()...
			cameraParams.ncols = theImg.getWidth();
			cameraParams.nrows = theImg.getHeight();
		}

		// Do tracking:
		if (step_num>1)  // we need "previous_image" to be valid.
		{
			// This single call makes: detection, tracking, recalculation of KLT_response, etc.
			tracker->trackFeatures(previous_image, theImg, trackedFeats);
		}

		// Save the image for the next step:
		previous_image = theImg;

		// Save history of feature observations:
		tracker->getProfiler().enter("Save history");

		for (size_t i=0;i<trackedFeats.size();++i)
		{
			TSimpleFeature &f = trackedFeats[i];

			const TPixelCoordf pxRaw(f.pt.x,f.pt.y);
			TPixelCoordf  pxUndist;
			//mrpt::vision::pinhole::undistort_point(pxRaw,pxUndist, cameraParams);
			pxUndist = pxRaw;

			feat_track_history.push_back( TFeatureObservation(f.ID,curCamPoseId, pxUndist ) );
		}
		curCamPoseId++;

		tracker->getProfiler().leave("Save history");

		// now that we're done with the image, we can directly write onto it
		//  for the display
		// ----------------------------------------------------------------
		if (DO_HIST_EQUALIZE_IN_GRAYSCALE && !theImg.isColor())
			theImg.equalizeHistInPlace();
		// Convert to color so we can draw color marks, etc.
		theImg.colorImageInPlace();

		double extra_tim_to_wait=0;

		{	// FPS:
			static CTicTac tictac;
			const double T = tictac.Tac();
			tictac.Tic();
			const double fps = 1.0/(std::max(1e-5,T));
			//theImg.filledRectangle(1,1,175,25,TColor(0,0,0));

			const int current_adapt_thres = tracker->getDetectorAdaptiveThreshold();

			theImg.selectTextFont("6x13B");
			theImg.textOut(3,3,format("FPS: %.03f Hz", fps ),TColor(200,200,0) );
			theImg.textOut(3,22,format("# feats: %u - Adaptive threshold: %i", (unsigned int)trackedFeats.size(), current_adapt_thres ),TColor(200,200,0) );

			theImg.textOut(3,41,
				format("# raw feats: %u - Removed: %u",
					(unsigned int)tracker->last_execution_extra_info.raw_FAST_feats_detected,
					(unsigned int)tracker->last_execution_extra_info.num_deleted_feats ),
					TColor(200,200,0) );

			extra_tim_to_wait = 1.0/MAX_FPS - 1.0/fps;
		}

		// Draw feature tracks
		if (SHOW_FEAT_TRACKS)
		{
			// Update new feature coords:
			tracker->getProfiler().enter("drawFeatureTracks");

			std::set<TFeatureID> observed_IDs;

			for (size_t i=0;i<trackedFeats.size();++i)
			{
				const TSimpleFeature &ft = trackedFeats[i];
				std::list<TPixelCoord> & seq = feat_tracks[ft.ID];

				observed_IDs.insert(ft.ID);

				if (seq.size()>=FEATS_TRACK_LEN) seq.erase(seq.begin());
				seq.push_back(ft.pt);

				// Draw:
				if (seq.size()>1)
				{
					const std::list<TPixelCoord>::const_iterator it_end = seq.end();

					std::list<TPixelCoord>::const_iterator it      = seq.begin();
					std::list<TPixelCoord>::const_iterator it_prev = it++;

					for (;it!=it_end;++it)
					{
						theImg.line(it_prev->x,it_prev->y,it->x,it->y, TColor(190,190,190) );
						it_prev = it;
					}
				}
			}

			tracker->getProfiler().leave("drawFeatureTracks");

			// Purge old data:
			for (std::map<TFeatureID,std::list<TPixelCoord> >::iterator it=feat_tracks.begin();it!=feat_tracks.end(); )
			{
				if (observed_IDs.find(it->first)==observed_IDs.end())
				{
					std::map<TFeatureID,std::list<TPixelCoord> >::iterator next_it = it;
					next_it++;
					feat_tracks.erase(it);
					it = next_it;
				}
				else ++it;
			}
		}

		// Draw Tracked feats:
		{
			theImg.selectTextFont("5x7");
			tracker->getProfiler().enter("drawFeatures");
			theImg.drawFeatures(trackedFeats, TColor(0,0,255), SHOW_FEAT_IDS, SHOW_RESPONSES);
			tracker->getProfiler().leave("drawFeatures");
		}


		// Update window:
		win->get3DSceneAndLock();
			gl_view->setImageView(theImg);
		win->unlockAccess3DScene();
		win->repaint();

		// Save debug output video:
		// ----------------------------------
		if (DO_SAVE_VIDEO)
		{
			static bool first = true;
			if (first)
			{
				first=false;
				if (vidWritter.open(
						VIDEO_OUTPUT_FILE,
						SAVE_VIDEO_FPS /* fps */, theImg.getSize(),
						SAVE_VIDEO_CODEC,
						true /* force color video */ ) )
				{
					cout << "[track-video] Saving tracking video to: " << VIDEO_OUTPUT_FILE << endl;
				}
				else
					cerr << "ERROR: Trying to create output video: " << VIDEO_OUTPUT_FILE << endl;
			}

			vidWritter << theImg;
		}

		if (extra_tim_to_wait>0)
			mrpt::system::sleep(1000*extra_tim_to_wait);

		step_num++;
	} // end infinite loop

	// Save tracked feats:
	if (save_tracked_history)
	{
		cout << "Saving tracked features to: tracked_feats.txt..."; cout.flush();
		feat_track_history.saveToTextFile("./tracked_feats.txt");
		cout << "Done!\n"; cout.flush();

#if 0
		// SBA:
		cout << "Saving cams.txt & pts.txt files in SBA library format..."; cout.flush();

		feat_track_history.removeFewObservedFeatures(3);
		feat_track_history.decimateCameraFrames(20);
		feat_track_history.compressIDs();

		TLandmarkLocationsVec  locs;
		TFramePosesVec         cams;
		feat_track_history.saveAsSBAFiles(locs,"pts.txt", cams, "cams.txt");


		cout << "Done!\n"; cout.flush();
#endif
	}


	return 0; // End ok.
}