示例#1
0
double feature_extraction_test_FASTER( int N, int threshold )
{
	CTicTac			tictac;

	// Generate a random image
	CImage  img;
	getTestImage(0,img);

	CFeatureExtraction		fExt;
	CFeatureList			feats;

	fExt.options.featsType	= TYP; // FASTER_N==9 ? featFASTER9 : (FASTER_N==10 ? featFASTER10 : featFASTER12 );
	fExt.options.FASTOptions.threshold = threshold; //20;
	fExt.options.patchSize = 0;

	img.grayscaleInPlace();

	tictac.Tic();
	for (int i=0;i<N;i++)
		fExt.detectFeatures( img, feats,0, MAX_N_FEATS );

	const double T = tictac.Tac()/N;
	return T;
}
示例#2
0
// ------------------------------------------------------
//				Benchmark: SURF
// ------------------------------------------------------
double feature_extraction_test_SURF( int N, int h )
{

	CTicTac	 tictac;

	// Generate a random image
	CImage  img;
	getTestImage(0,img);

	CFeatureExtraction	fExt;
	CFeatureList		featsSURF;

	fExt.options.featsType					= featSURF;

	tictac.Tic();
	for (int i=0;i<N;i++)
		fExt.detectFeatures( img, featsSURF );

	const double T = tictac.Tac()/N;

//	cout << "SURF: " << featsSURF.size();

	return T;
}
示例#3
0
// ------------------------------------------------------
//				Benchmark: FAST
// ------------------------------------------------------
double feature_extraction_test_FAST( int N, int h )
{
	CTicTac			tictac;

	// Generate a random image
	CImage  img;
	getTestImage(0,img);

	CFeatureExtraction		fExt;
	CFeatureList			featsFAST;

	fExt.options.featsType	= featFAST;
	fExt.options.FASTOptions.threshold = 20;
	fExt.options.patchSize = 0;

	img.grayscaleInPlace();

	tictac.Tic();
	for (int i=0;i<N;i++)
		fExt.detectFeatures( img, featsFAST );

	const double T = tictac.Tac()/N;
	return T;
}
示例#4
0
// ------------------------------------------------------
//				Benchmark: SIFT descriptor only
// ------------------------------------------------------
double feature_extraction_test_SIFT_desc( int N, int h )
{
	CTicTac	 tictac;

	// Generate a random image
	CImage  img;
	getTestImage(0,img);

	CFeatureExtraction	fExt;
	CFeatureList		featsHarris;

	fExt.options.featsType	= featHarris;

	fExt.detectFeatures( img, featsHarris );

	tictac.Tic();
	for (int i=0;i<N;i++)
		fExt.computeDescriptors( img, featsHarris, descSIFT );

	const double T = tictac.Tac()/N;

//	cout << "SIFT desc: " << featsHarris.size();
	return T;
}
示例#5
0
// ------------------------------------------------------
//				Benchmark: Harris + SAD
// ------------------------------------------------------
double feature_matching_test_FAST_SAD(int w, int h)
{
	CTicTac tictac;

	CImage imL, imR;
	CFeatureExtraction fExt;
	CFeatureList featsFAST_L, featsFAST_R;
	CMatchedFeatureList mFAST;

	getTestImage(0, imR);
	getTestImage(1, imL);

	// Extract features: HARRIS
	fExt.options.featsType = featFAST;

	// size_t				nMatches;
	TMatchingOptions opt;
	const size_t N = 20;
	opt.matching_method = TMatchingOptions::mmSAD;

	// HARRIS
	tictac.Tic();
	for (size_t i = 0; i < N; i++)
	{
		fExt.detectFeatures(imL, featsFAST_L, 0, NFEATS);
		fExt.detectFeatures(imR, featsFAST_R, 0, NFEATS);
		// nMatches =
		matchFeatures(featsFAST_L, featsFAST_R, mFAST, opt);
	}
	const double T = tictac.Tac() / N;

	//	cout << endl << "L: " << featsFAST_L.size() << " R: " <<
	// featsFAST_R.size() << " M: " << mFAST.size() << endl;

	return T;
}
示例#6
0
// ------------------------------------------------------
//				Benchmark: SIFT (hess)
// ------------------------------------------------------
double feature_extraction_test_SIFT( int N, int h )
{
	CTicTac	 tictac;

	// Generate a random image
	CImage  img;
	getTestImage(0,img);

	CFeatureExtraction	fExt;
	CFeatureList		featsSIFT;

	fExt.options.featsType	= featSIFT;
	fExt.options.SIFTOptions.implementation = CFeatureExtraction::Hess;

	tictac.Tic();
	for (int i=0;i<N;i++)
		fExt.detectFeatures( img, featsSIFT );

	const double T = tictac.Tac()/N;

//	cout << "SIFT: " << featsSIFT.size();

	return T;
}
示例#7
0
// ------------------------------------------------------
//				Benchmark: Spin descriptor
// ------------------------------------------------------
double feature_extraction_test_Spin_desc( int N, int h )
{
	CTicTac	 tictac;

	// Generate a random image
	CImage  img;
	getTestImage(0,img);

	CFeatureExtraction	fExt;
	CFeatureList		featsHarris;

	fExt.options.SpinImagesOptions.radius				= 13;
	fExt.options.SpinImagesOptions.hist_size_distance	= 10;
	fExt.options.SpinImagesOptions.hist_size_intensity	= 10;

	fExt.detectFeatures( img, featsHarris );

	tictac.Tic();
	for (int i=0;i<N;i++)
		fExt.computeDescriptors( img, featsHarris, descSpinImages );

	const double T = tictac.Tac()/N;
	return T;
}
示例#8
0
void thread_grabbing(TThreadParam& p)
{
	try
	{
		CFileGZOutputStream f_out_rawlog;
		if (arg_out_rawlog.isSet())
		{
			if (!f_out_rawlog.open(arg_out_rawlog.getValue()))
				THROW_EXCEPTION_FMT(
					"Error creating output rawlog file: %s",
					arg_out_rawlog.getValue().c_str());
		}
		auto arch = mrpt::serialization::archiveFrom(f_out_rawlog);

		mrpt::hwdrivers::CVelodyneScanner velodyne;

		if (arg_verbose.isSet()) velodyne.enableVerbose(true);

		// Set params:
		velodyne.setModelName(mrpt::typemeta::TEnumType<
							  mrpt::hwdrivers::CVelodyneScanner::model_t>::
								  name2value(arg_model.getValue()));
		if (arg_ip_filter.isSet())
			velodyne.setDeviceIP(
				arg_ip_filter.getValue());  // Default: from any IP
		if (arg_in_pcap.isSet())
			velodyne.setPCAPInputFile(arg_in_pcap.getValue());
		if (arg_out_pcap.isSet())
			velodyne.setPCAPOutputFile(arg_out_pcap.getValue());

		// If you have a calibration file, better than default values:
		if (arg_calib_file.isSet())
		{
			mrpt::obs::VelodyneCalibration calib;
			if (!calib.loadFromXMLFile(arg_calib_file.getValue()))
				throw std::runtime_error(
					"Aborting: error loading calibration file.");
			velodyne.setCalibration(calib);
		}

		// Open:
		cout << "Calling CVelodyneScanner::initialize()...";
		velodyne.initialize();
		cout << "OK\n";

		cout << "Waiting for first data packets (Press CTRL+C to abort)...\n";

		CTicTac tictac;
		int nScans = 0;
		bool hard_error = false;

		while (!hard_error && !p.quit)
		{
			// Grab new observations from the camera:
			CObservationVelodyneScan::Ptr
				obs;  // (initially empty) Smart pointers to observations
			CObservationGPS::Ptr obs_gps;

			hard_error = !velodyne.getNextObservation(obs, obs_gps);

			// Save to log file:
			if (f_out_rawlog.fileOpenCorrectly())
			{
				if (obs) arch << *obs;
				if (obs_gps) arch << *obs_gps;
			}

			if (obs)
			{
				std::atomic_store(&p.new_obs, obs);
				nScans++;
			}
			if (obs_gps) std::atomic_store(&p.new_obs_gps, obs_gps);

			if (p.pushed_key != 0)
			{
				switch (p.pushed_key)
				{
					case 27:
						p.quit = true;
						break;
				}

				// Clear pushed key flag:
				p.pushed_key = 0;
			}

			if (nScans > 5)
			{
				p.Hz = nScans / tictac.Tac();
				nScans = 0;
				tictac.Tic();
			}
		}
	}
	catch (const std::exception& e)
	{
		cout << "Exception in Velodyne thread: " << mrpt::exception_to_str(e)
			 << endl;
		p.quit = true;
	}
}
示例#9
0
文件: test.cpp 项目: bcsdleweev/mrpt
// ------------------------------------------------------
//				BenchmarkGridmaps
// ------------------------------------------------------
void BenchmarkGridmaps()
{
	randomGenerator.randomize(333);

	CMultiMetricMap					metricMap;
	TSetOfMetricMapInitializers		mapInit;

	// Create gridmap:
	mapInit.loadFromConfigFile( CConfigFile(iniFile), "METRIC_MAPS" );
	metricMap.setListOfMaps(&mapInit);


	// prepare the laser scan:
	CObservation2DRangeScan	scan1;
	scan1.aperture = M_PIf;
	scan1.rightToLeft = true;
	scan1.validRange.resize( SCANS_SIZE );
	scan1.scan.resize(SCANS_SIZE);
	ASSERT_( sizeof(SCAN_RANGES_1) == sizeof(float)*SCANS_SIZE );

	memcpy( &scan1.scan[0], SCAN_RANGES_1, sizeof(SCAN_RANGES_1) );
	memcpy( &scan1.validRange[0], SCAN_VALID_1, sizeof(SCAN_VALID_1) );

#if 1
		CRawlog	rawlog;
		rawlog.loadFromRawLogFile("/Trabajo/Code/MRPT/share/mrpt/datasets/2006-01ENE-21-SENA_Telecom Faculty_one_loop_only.rawlog");
		scan1 = *rawlog.getAsObservations(400)->getObservationByClass<CObservation2DRangeScan>();
#endif



	ASSERT_( metricMap.m_gridMaps.size() );
	COccupancyGridMap2DPtr gridMap = metricMap.m_gridMaps[0];
	COccupancyGridMap2D		gridMapCopy( *gridMap );

	int  i, N;
	CTicTac	 tictac;


	// test 1: getcell
	// ----------------------------------------
	if (1)
	{
		N = 10000000;

		cout << "Running test #1: getCell... "; cout.flush();

		//COccupancyGridMap2D::cellType	cell;
		float	p=0;

		tictac.Tic();
		for (i=0;i<N;i++)
		{
			p += gridMap->getCell( 0, 0 );
		}
		double T = tictac.Tac();
		cout << "-> " << 1e9*T/N << " ns/iter. p=" << p << endl;  // the "p" is to avoid optimizing out the entire loop!
	}

	// test 2: setcell
	// ----------------------------------------
	if (1)
	{
		N = 10000000;

		cout << "Running test #2: setCell... "; cout.flush();

		float	p=0.8f;

		tictac.Tic();
		for (i=0;i<N;i++)
		{
			gridMap->setCell( 0, 0, p );
		}
		double T = tictac.Tac();
		cout << "-> " << 1e9*T/N << " ns/iter." << endl;  // the "p" is to avoid optimizing out the entire loop!
	}

	// test 3: updateCell
	// ----------------------------------------
	if (1)
	{
		N = 1000000;

		cout << "Running test #3: updateCell... "; cout.flush();

		float	p=0.57f;

		tictac.Tic();
		for (i=0;i<N;i++)
		{
			gridMap->updateCell( 0, 0, p );
		}
		double T = tictac.Tac();
		cout << "-> " << 1e9*T/N << " ns/iter." << endl;  // the "p" is to avoid optimizing out the entire loop!
	}

	// test 4: updateCell_fast
	// ----------------------------------------
	if (1)
	{
		N = 10000000;

		cout << "Running test #4: updateCell_fast... "; cout.flush();

		float	p=0.57f;
		COccupancyGridMap2D::cellType  logodd_obs = COccupancyGridMap2D::p2l( p );
		//float   p_1 = 1-p;

		COccupancyGridMap2D::cellType  *theMapArray = gridMap->getRow(0);
		unsigned  theMapSize_x = gridMap->getSizeX();
		COccupancyGridMap2D::cellType   logodd_thres_occupied =  COccupancyGridMap2D::OCCGRID_CELLTYPE_MIN+logodd_obs;

		tictac.Tic();
		for (i=0;i<N;i++)
		{
			COccupancyGridMap2D::updateCell_fast_occupied( 2, 2, logodd_obs,logodd_thres_occupied, theMapArray, theMapSize_x);
		}
		double T = tictac.Tac();
		cout << "-> " << 1e9*T/N << " ns/iter." << endl;  // the "p" is to avoid optimizing out the entire loop!
	}

#if 0
	for (i=50;i<51;i++)
	{
		CPose3D  pose3D(0.21,0.34,0,-2);
		//scan1.validRange.assign(scan1.validRange.size(), false);
		//scan1.validRange[i]=true;

		gridMap->clear();
		gridMap->resizeGrid(-5,20,-15,15);
		gridMap->insertObservation( &scan1, &pose3D );
		gridMap->saveAsBitmapFile(format("./gridmap_with_widening_%04i.png",i));
	}
#endif

	// test 5: Laser insertion
	// ----------------------------------------
	if (1)
	{
		gridMap->insertionOptions.wideningBeamsWithDistance = false;
		N = 3000;
		cout << "Running test #5: Laser insert. w/o widen... "; cout.flush();
		tictac.Tic();
		for (i=0;i<N;i++)
		{
#if 1
			CPose2D  pose(
				randomGenerator.drawUniform(-1.0,1.0),
				randomGenerator.drawUniform(-1.0,1.0),
				randomGenerator.drawUniform(-M_PI,M_PI) );
			CPose3D  pose3D(pose);
#else
			CPose3D  pose3D;
#endif

			gridMap->insertObservation( &scan1, &pose3D );
		}
		double T = tictac.Tac();
		cout << "-> " << 1000*T/N << " ms/iter, scans/sec:" << N/T << endl;

		CPose3D  pose3D;
		gridMap->clear();
		gridMap->insertObservation( &scan1, &pose3D );
		gridMap->saveAsBitmapFile("./gridmap_without_widening.png");
	}

	// test 6: Laser insertion without widening
	// --------------------------------------------------
	if (1)
	{
		gridMap->insertionOptions.wideningBeamsWithDistance = true;
		N = 3000;
		cout << "Running test #6: Laser insert. widen... "; cout.flush();
		tictac.Tic();
		for (i=0;i<N;i++)
		{
#if 1
			CPose2D  pose(
				randomGenerator.drawUniform(-1.0,1.0),
				randomGenerator.drawUniform(-1.0,1.0),
				randomGenerator.drawUniform(-M_PI,M_PI) );
			CPose3D  pose3D(pose);
#else
			CPose3D  pose3D;
#endif
			gridMap->insertObservation( &scan1, &pose3D );
		}
		double T = tictac.Tac();
		cout << "-> " << 1000*T/N << " ms/iter, scans/sec:" << N/T << endl;

		CPose3D  pose3D;
		gridMap->clear();
		gridMap->insertObservation( &scan1, &pose3D );
		gridMap->saveAsBitmapFile("./gridmap_with_widening.png");
	}


	// test 7: Grid resize
	// ----------------------------------------
	if (1)
	{
		N = 400;
		cout << "Running test #7: Grid resize... "; cout.flush();
		tictac.Tic();
		for (i=0;i<N;i++)
		{
			*gridMap = gridMapCopy;
			gridMap->resizeGrid(-30,30,-40,40);
		}
		double T = tictac.Tac();
		cout << "-> " << 1000*T/N << " ms/iter" << endl;
	}

	// test 8: Likelihood computation
	// ----------------------------------------
	if (1)
	{
		N = 5000;

		*gridMap = gridMapCopy;
		CPose3D pose3D(0,0,0);
		gridMap->insertObservation( &scan1, &pose3D );

		cout << "Running test #8: Likelihood... "; cout.flush();
		double R = 0;
		tictac.Tic();
		for (i=0;i<N;i++)
		{
			CPose2D  pose(
				randomGenerator.drawUniform(-1.0,1.0),
				randomGenerator.drawUniform(-1.0,1.0),
				randomGenerator.drawUniform(-M_PI,M_PI) );
			R+=gridMap->computeObservationLikelihood(&scan1,pose);
		}
		double T = tictac.Tac();
		cout << "-> " << 1000*T/N << " ms/iter" << endl;
	}

}
示例#10
0
/*---------------------------------------------------------------
					AlignPDF_correlation
---------------------------------------------------------------*/
CPosePDF::Ptr CGridMapAligner::AlignPDF_correlation(
	const mrpt::maps::CMetricMap* mm1, const mrpt::maps::CMetricMap* mm2,
	const CPosePDFGaussian& initialEstimationPDF, float* runningTime,
	void* info)
{
	MRPT_UNUSED_PARAM(initialEstimationPDF);
	MRPT_UNUSED_PARAM(info);

	MRPT_START

	//#define	CORRELATION_SHOW_DEBUG

	CTicTac* tictac = nullptr;

	// Asserts:
	// -----------------
	ASSERT_(mm1->GetRuntimeClass() == CLASS_ID(COccupancyGridMap2D));
	ASSERT_(mm2->GetRuntimeClass() == CLASS_ID(COccupancyGridMap2D));
	const auto* m1 = static_cast<const COccupancyGridMap2D*>(mm1);
	const auto* m2 = static_cast<const COccupancyGridMap2D*>(mm2);

	ASSERT_(m1->getResolution() == m2->getResolution());

	if (runningTime)
	{
		tictac = new CTicTac();
		tictac->Tic();
	}

	// The PDF to estimate:
	// ------------------------------------------------------
	CPosePDFGaussian::Ptr PDF = mrpt::make_aligned_shared<CPosePDFGaussian>();

	// Determine the extension to compute the correlation into:
	// ----------------------------------------------------------
	float phiResolution = DEG2RAD(0.2f);
	float phiMin = -M_PIf + 0.5f * phiResolution;
	float phiMax = M_PIf;

	// Compute the difference between maps for each (u,v) pair!
	// --------------------------------------------------------------
	float phi;
	float pivotPt_x = 0.5f * (m1->getXMax() + m1->getXMin());
	float pivotPt_y = 0.5f * (m1->getYMax() + m1->getYMin());
	COccupancyGridMap2D map2_mod;
	CImage map1_img, map2_img;
	float currentMaxCorr = -1e6f;

	m1->getAsImage(map1_img);

	map2_mod.setSize(
		m1->getXMin(), m1->getXMax(), m1->getYMin(), m1->getYMax(),
		m1->getResolution());
	size_t map2_lx = map2_mod.getSizeX();
	size_t map2_ly = map2_mod.getSizeY();
	CMatrix outCrossCorr, bestCrossCorr;
	float map2width_2 = 0.5f * (map2_mod.getXMax() - map2_mod.getXMin());

#ifdef CORRELATION_SHOW_DEBUG
	CDisplayWindow* win = new CDisplayWindow("corr");
	CDisplayWindow* win2 = new CDisplayWindow("map2_mod");
#endif

	// --------------------------------------------------------
	// Use FFT-based correlation for each orientation:
	// --------------------------------------------------------
	for (phi = phiMin; phi < phiMax; phi += phiResolution)
	{
		// Create the displaced map2 grid, for the size of map1:
		// --------------------------------------------------------
		CPose2D v2(
			pivotPt_x - cos(phi) * map2width_2,
			pivotPt_y - sin(phi) * map2width_2,
			phi);  // Rotation point: the centre of img1:
		CPoint2D v1, v3;
		v2 = CPose2D(0, 0, 0) - v2;  // Inverse

		for (unsigned int cy2 = 0; cy2 < map2_ly; cy2++)
		{
			COccupancyGridMap2D::cellType* row = map2_mod.getRow(cy2);
			for (unsigned int cx2 = 0; cx2 < map2_lx; cx2++)
			{
				v3 = v2 + CPoint2D(map2_mod.idx2x(cx2), map2_mod.idx2y(cy2));
				*row++ = m2->p2l(m2->getPos(v3.x(), v3.y()));
			}
		}

		map2_mod.getAsImage(map2_img);
		//		map2_img.saveToBMP("__debug_map2mod.bmp");

		// Compute the correlation:
		map1_img.cross_correlation_FFT(
			map2_img, outCrossCorr, -1, -1, -1, -1,
			127,  // Bias to be substracted
			127  // Bias to be substracted
		);

		float corrPeak = outCrossCorr.maxCoeff();
		printf("phi = %fdeg \tmax corr=%f\n", RAD2DEG(phi), corrPeak);

		// Keep the maximum:
		if (corrPeak > currentMaxCorr)
		{
			currentMaxCorr = corrPeak;
			bestCrossCorr = outCrossCorr;
			PDF->mean.phi(phi);
		}

#ifdef CORRELATION_SHOW_DEBUG
		outCrossCorr.adjustRange(0, 1);
		CImage aux(outCrossCorr, true);
		win->showImage(aux);
		win2->showImage(map2_img);
		std::this_thread::sleep_for(5ms);
#endif

	}  // end for phi

	if (runningTime)
	{
		*runningTime = tictac->Tac();
		delete tictac;
	}

	bestCrossCorr.normalize(0, 1);
	CImage aux(bestCrossCorr, true);
	aux.saveToFile("_debug_best_corr.png");

#ifdef CORRELATION_SHOW_DEBUG
	delete win;
	delete win2;
#endif

	// Transform the best corr matrix peak into coordinates:
	CMatrix::Index uMax, vMax;
	currentMaxCorr = bestCrossCorr.maxCoeff(&uMax, &vMax);

	PDF->mean.x(m1->idx2x(uMax));
	PDF->mean.y(m1->idx2y(vMax));

	return PDF;

	// Done!
	MRPT_END
}
示例#11
0
/*---------------------------------------------------------------
							http_get
  ---------------------------------------------------------------*/
ERRORCODE_HTTP  BASE_IMPEXP
mrpt::utils::net::http_get(
	const string	&url,
	vector_byte		&out_content,
	string			&out_errormsg,
	int				port,
	const string	&auth_user,
	const string	&auth_pass,
	int				*out_http_responsecode,
	mrpt::utils::TParameters<string>  *extra_headers,
	mrpt::utils::TParameters<string>  *out_headers,
	int  timeout_ms
	)
{
	// Reset output data:
	out_content.clear();
	if (out_http_responsecode) *out_http_responsecode=0;
	if (out_headers) out_headers->clear();

	// URL must be:
	// http://<SERVER>/<LOCAL_ADDR>

	if (0!=::strncmp(url.c_str(),"http://",7))
	{
		out_errormsg="URL must start with 'http://'";
		return net::erBadURL;
	}

	string server_addr = url.substr(7);
	string get_object = "/";

	// Remove from the first "/" on:
	size_t pos = server_addr.find("/");
	if (pos==0)
	{
		out_errormsg="Server name not found in URL";
		return net::erBadURL;
	}
	if (pos!=string::npos)
	{
		get_object = server_addr.substr(pos);
		server_addr.resize(pos);
	}


	CClientTCPSocket	sock;

	try {
		// Connect:
		sock.connect(server_addr,port, timeout_ms);
	}
	catch (std::exception &e) {
		out_errormsg = e.what();
		return net::erCouldntConnect;
	}

	try {
		// Set the user-defined headers (we may overwrite them if needed)
		TParameters<string>  headers_to_send;
		if (extra_headers)
			headers_to_send=*extra_headers;

		headers_to_send["Connection"]="close"; // Don't keep alive

		if (!headers_to_send.has("User-Agent"))
			headers_to_send["User-Agent"]="MRPT Library";

		// Implement HTTP Basic authentication:
		// See: http://en.wikipedia.org/wiki/Basic_access_authentication
		if (!auth_user.empty())
		{
			string auth_str = auth_user+string(":")+auth_pass;
			vector_byte  v(auth_str.size());
			::memcpy(&v[0],&auth_str [0],auth_str .size());

			string encoded_str;
			mrpt::system::encodeBase64(v,encoded_str);

			headers_to_send["Authorization"]=string("Basic ")+encoded_str;
		}


		// Prepare the request string
		// ---------------------------------
		string req = format(
		  "GET %s HTTP/1.1\r\n"
          "Host: %s\r\n",
		  get_object.c_str(),server_addr.c_str());

		// Other headers:
		for (TParameters<string>::const_iterator i=headers_to_send.begin();i!=headers_to_send.end();++i)
		{
			req+=i->first;
			req+=": ";
			req+=i->second;
			req+="\r\n";
		}

		// End:
		req+="\r\n";

		// Send:
		sock.sendString(req);

		// Read answer:
		vector_byte		buf;
		buf.reserve(1<<14);

		size_t	total_read = 0;
		bool	content_length_read = false;
		bool    all_headers_read = false;
		size_t  content_length = 0;
		size_t  content_offset = 0;
		int		http_code = 0;
		mrpt::utils::TParameters<string>  rx_headers;

		CTicTac  watchdog;
		watchdog.Tic();

		while (!content_length_read || total_read<(content_offset+content_length))
		{
			// Read until "Content-Length: XXX \r\n" is read or the whole message is read,
			//  or an error code is read.
			size_t	to_read_now;

			if (!content_length_read)
			{
				to_read_now = 1500;
			}
			else
			{
				to_read_now	= (content_length+content_offset) -total_read;
			}

			// make room for the data to come:
			buf.resize(total_read+to_read_now+1);

			// Read:
			size_t len = sock.readAsync(&buf[total_read],to_read_now, timeout_ms,100);
			if (!len)
			{
				//
				if (!sock.isConnected())
				{
					if (all_headers_read) // It seems we're done...
						break;
					else
					{
						out_errormsg = "Connection to server was lost";
						return net::erCouldntConnect;
					}
				}

				if ( watchdog.Tac()>1e-3*timeout_ms)
				{
					out_errormsg = "Timeout waiting answer from server";
					return net::erCouldntConnect;
				}
				mrpt::system::sleep(10);
				continue;
			}
			total_read+=len;
			watchdog.Tic();

			buf[total_read]='\0';

			// do we have a \r\n\r\n  ??
			if (!all_headers_read)
			{
				const char *ptr = ::strstr(reinterpret_cast<const char*>(&buf[0]),"\r\n\r\n");
				if (ptr)
				{
					all_headers_read = true;
					const size_t pos_dblret = ((char*)ptr)-(char*)(&buf[0]);

					// Process the headers:
					// ------------------------------
					if (!::strncmp("HTTP/",(const char*)&buf[0], 5))
					{
						http_code = ::atoi( (const char*)&buf[9] );
					}
					else
					{
						// May it be a "SOURCETABLE " answer for NTRIP protocol??
						if (!::strncmp("SOURCETABLE ",(const char*)&buf[0], 12))
						{
							http_code = ::atoi( (const char*)&buf[12] );
						}
						else
						{
							out_errormsg = "Server didn't send an HTTP/1.1 answer.";
							return net::erOtherHTTPError;
						}
					}

					// Check the HTTP code and the content-length:
					content_offset = pos_dblret + 4;

					// Do we have a "Content-Length:"??
					const char *ptr_len = ::strstr(reinterpret_cast<const char*>(&buf[0]),"Content-Length:");
					if (ptr_len)
					{
						content_length = ::atol( ptr_len+15 );
						content_length_read = true;
					}

					// Parse the rest of HTTP headers:
					{
						string aux_all_headers;
						deque<string> lstLines;
						aux_all_headers.resize(content_offset);
						::memcpy( &aux_all_headers[0],&buf[0],content_offset);

						mrpt::system::tokenize(aux_all_headers,"\r\n",lstLines);

						for (deque<string>::const_iterator i=lstLines.begin();i!=lstLines.end();++i)
						{
							const size_t p = i->find(":");
							if (p==string::npos) continue;

							const string key = i->substr(0,p);
							const string val = i->substr(p+2);
							rx_headers[key] = val;
						}
					}

				}
			}
		} // end while


		if (out_http_responsecode) *out_http_responsecode=http_code;
		if (out_headers) *out_headers = rx_headers;

		// Remove the headers from the content:
		buf.erase(buf.begin(),buf.begin()+content_offset);

		// Process: "Transfer-Encoding: chunked"
		if (rx_headers.has("Transfer-Encoding") && rx_headers["Transfer-Encoding"]=="chunked" )
		{
			// See: http://en.wikipedia.org/wiki/Chunked_transfer_encoding

			size_t index = 0;
			while (index<buf.size())
			{
				if (buf[index]=='\r' && buf[index+1]=='\n')
				{
					buf.erase(buf.begin()+index,buf.begin()+index+2);
					continue;
				}

				const char *pCRLF = ::strstr((const char*)&buf[index],"\r\n");
				if (!pCRLF) break;

				const size_t len_substr = ((char*)pCRLF)-(char*)(&buf[index]);

				string  sLen((const char*)&buf[index], len_substr);
				sLen=string("0x")+sLen;

				unsigned int lenChunk;
				int fields = ::sscanf(sLen.c_str(),"%x",&lenChunk);
				if (!fields) break;

				// Remove the len of this chunk header from the data:
				buf.erase(buf.begin()+index,buf.begin()+index+len_substr+2);

				index+=lenChunk;

				if (!lenChunk)
				{
					buf.resize(index);
					break;
				}
			}
		}


		// Set the content output:
		out_content.swap(buf);

		if (http_code==200)
		{
			return net::erOk;
		}
		else
		{
			out_errormsg = format("HTTP error %i",http_code);
			return net::erOtherHTTPError;
		}
	}
	catch (std::exception &e) {
		out_errormsg = e.what();
		return net::erCouldntConnect;
	}

	return net::erOk;
}
示例#12
0
void TestCapture_OpenCV()
{
	CImageGrabber_OpenCV* capture = nullptr;

	if (LIVE_CAM)
	{
#if 0  // test: Select the desired resolution
		mrpt::vision::TCaptureCVOptions	opts;
		opts.frame_width = 320;
		opts.frame_height = 240;
		capture = new CImageGrabber_OpenCV( 0, CAMERA_CV_AUTODETECT, opts );
#else
		capture = new CImageGrabber_OpenCV(N_CAM_TO_OPEN, CAMERA_CV_AUTODETECT);
#endif
	}
	else
	{
		capture = new CImageGrabber_OpenCV(AVI_TO_OPEN);
	}

	CTicTac tictac;

	cout << "Press any key to stop capture to 'capture.rawlog'..." << endl;

	CFileGZOutputStream fil("./capture.rawlog");

	CDisplayWindow win("Capturing...");

	int cnt = 0;

	while (!mrpt::system::os::kbhit())
	{
		if ((cnt++ % 20) == 0)
		{
			if (cnt > 0)
			{
				double t = tictac.Tac();
				double FPS = 20 / t;
				printf("\n %f FPS\n", FPS);
			}
			tictac.Tic();
		}

		CObservationImage::Ptr obs = mrpt::make_aligned_shared<
			CObservationImage>();  // Memory will be freed by
		// SF destructor in each
		// loop.
		if (!capture->getObservation(*obs))
		{
			cerr << "Error retrieving images!" << endl;
			break;
		}

		fil << obs;

		cout << ".";
		cout.flush();
		if (win.isOpen()) win.showImage(obs->image);
	}

	delete capture;
}
示例#13
0
文件: Main.cpp 项目: DYFeng/mrpt
void vOdometry_onthefly()
{
    // LOAD INTRINSIC AND DISTORTION PARAMS OF THE CAMERAS FROM CONFIG FILE
    TCamera leftCamera, rightCamera;
    leftCamera.loadFromConfigFile( "LEFT_CAMERA", *iniFile );
    rightCamera.loadFromConfigFile( "RIGHT_CAMERA", *iniFile );

    const int resX = leftCamera.ncols;
    const int resY = leftCamera.nrows;

    vector_double rcTrans(3);
    vector_double rcRot(9);
	iniFile->read_vector( "RIGHT_CAMERA", "rcTrans", vector_double(), rcTrans, true );
	iniFile->read_vector( "RIGHT_CAMERA", "rcRot", vector_double(), rcRot, true );

    double m1[3][3];
    for(unsigned int i = 0; i < 3; ++i)
        for(unsigned int j = 0; j < 3; ++j)
            m1[i][j] = rcRot[i*3+j];

    // PRE: COMPUTE INITIAL STEREO RECTIFICATION MAPS
    double ipl[3][3], ipr[3][3], dpl[5], dpr[5];
    for( unsigned int i = 0; i < 3; ++i )
        for( unsigned int j = 0; j < 3; ++j )
        {
            ipl[i][j] = leftCamera.intrinsicParams(i,j);
            ipr[i][j] = rightCamera.intrinsicParams(i,j);
        }

    for( unsigned int i = 0; i < 5; ++i )
    {
        dpl[i] = leftCamera.dist[i];
        dpr[i] = rightCamera.dist[i];
    }

    // WITH OLD OPENCV VERSION
    // *****************************************************************
    /** /
    CvMat R = cvMat( 3, 3, CV_64F, &m1 );
    CvMat T = cvMat( 3, 1, CV_64F, &rcTrans );

    CvMat K1 = cvMat(3,3,CV_64F,ipl);
    CvMat K2 = cvMat(3,3,CV_64F,ipr);
    CvMat D1 = cvMat(1,5,CV_64F,dpl);
    CvMat D2 = cvMat(1,5,CV_64F,dpr);

    double _R1[3][3], _R2[3][3], _P1[3][4], _P2[3][4];
    CvMat R1 = cvMat(3,3,CV_64F,_R1);
    CvMat R2 = cvMat(3,3,CV_64F,_R2);
    CvMat P1 = cvMat(3,4,CV_64F,_P1);
    CvMat P2 = cvMat(3,4,CV_64F,_P2);

    CvSize imageSize = {resX,resY};
    cvStereoRectify( &K1, &K2, &D1, &D2, imageSize,
        &R, &T,
        &R1, &R2, &P1, &P2, 0, 0 );

    CvMat* mx1 = cvCreateMat( imageSize.height,
            imageSize.width, CV_32F );
    CvMat* my1 = cvCreateMat( imageSize.height,
            imageSize.width, CV_32F );
    CvMat* mx2 = cvCreateMat( imageSize.height,
            imageSize.width, CV_32F );
    CvMat* my2 = cvCreateMat( imageSize.height,
            imageSize.width, CV_32F );
    CvMat* img1r = cvCreateMat( imageSize.height,
            imageSize.width, CV_8U );
    CvMat* img2r = cvCreateMat( imageSize.height,
            imageSize.width, CV_8U );

    cvInitUndistortRectifyMap(&K1,&D1,&R1,&P1,mx1,my1);
    cvInitUndistortRectifyMap(&K2,&D2,&R2,&P2,mx2,my2);

    CImage im1, im2;
    im1.loadFromFile("imgs/leftImage1024.jpg");
    im2.loadFromFile("imgs/rightImage1024.jpg");

    cvRemap( static_cast<IplImage *>( im1.getAsIplImage() ), img1r, mx1, my1 );
    cvRemap( static_cast<IplImage *>( im2.getAsIplImage() ), img2r, mx2, my2 );

    cvSaveImage( "imgs/leftImage1024_rect.jpg", img1r );
    cvSaveImage( "imgs/rightImage1024_rect.jpg", img2r );

    IplImage stub, *dst_img, stub2, *dst_img2;
    dst_img = cvGetImage(img1r, &stub);
    dst_img2 = cvGetImage(img2r, &stub2);

    CImage im1out( dst_img );
    CImage im2out( dst_img2 );
    /**/
    // *****************************************************************

    // WITH NEW OPENCV VERSION
    // *****************************************************************
    /**/
    cv::Mat R( 3, 3, CV_64F, &m1 );
    cv::Mat T( 3, 1, CV_64F, &rcTrans );

    cv::Mat K1(3,3,CV_64F,ipl);
    cv::Mat K2(3,3,CV_64F,ipr);
    cv::Mat D1(1,5,CV_64F,dpl);
    cv::Mat D2(1,5,CV_64F,dpr);

    double _R1[3][3], _R2[3][3], _P1[3][4], _P2[3][4], _Q[4][4];
    cv::Mat R1(3,3,CV_64F,_R1);
    cv::Mat R2(3,3,CV_64F,_R2);
    cv::Mat P1(3,4,CV_64F,_P1);
    cv::Mat P2(3,4,CV_64F,_P2);
    cv::Mat Q(4,4,CV_64F,_Q);

    cv::Size nSize(resX,resY);
    float alpha = 0.0;                  // alpha value: 0.0 = zoom and crop the image so there's not black areas
    cv::stereoRectify(
        K1, D1,
        K2, D2,
        nSize,
        R, T,
        R1, R2, P1, P2, Q, alpha,
        cv::Size(), 0, 0, 0 );

    cv::Mat mapx1, mapy1, mapx2, mapy2;
    mapx1.create( resY, resX, CV_32FC1 );
    mapy1.create( resY, resX, CV_32FC1 );
    mapx2.create( resY, resX, CV_32FC1 );
    mapy2.create( resY, resX, CV_32FC1 );

    cv::Size sz1, sz2;
    cv::initUndistortRectifyMap( K1, D1, R1, P1, cv::Size(resX,resY), CV_32FC1, mapx1, mapy1 );
    cv::initUndistortRectifyMap( K2, D2, R2, P2, cv::Size(resX,resY), CV_32FC1, mapx2, mapy2 );

    // Capture image from camera
    // Perform visual odometry!!
    CVisualOdometryStereo vOdometer;

    vOdometer.loadOptions( *iniFile );
    vOdometer.stereoParams.dumpToConsole();

    mrpt::system::pause();

    CCameraSensorPtr cam( new CCameraSensor );

    cam->loadConfig( *iniFile, "GRABBER_CONFIG");
    cam->initialize();	// This will raise an exception if neccesary

//    // Extraction
//    CFeatureExtraction fExt;
//    fExt.options.loadFromConfigFile( *iniFile, "EXTRACTION" );
//    fExt.options.dumpToConsole();
//
//    // Matching
//    TMatchingOptions matchOptions;
//    matchOptions.loadFromConfigFile( *iniFile, "MATCH" );
//    matchOptions.dumpToConsole();
//
//    // Projection
//    TStereoSystemParams stereoParams;
//    stereoParams.loadFromConfigFile( *iniFile, "PROJECT" );
//    stereoParams.dumpToConsole();

//    CDisplayWindow w1, w2, w3, w4;  // The grabber and rectified images
//    w1.setPos(0,0);
//    w2.setPos(340,0);
//    w3.setPos(0,300);
//    w4.setPos(340,300);

    CDisplayWindow win("Matches");
    win.setPos(0,550);

    CDisplayWindow3D win3D("Map");
    COpenGLScenePtr &scene = win3D.get3DSceneAndLock();
    {	// Ground plane:
		CGridPlaneXYPtr obj = CGridPlaneXY::Create(-200,200,-200,200,0, 5);
		obj->setColor(0.7,0.7,0.7);
		scene->insert(obj);
		scene->insert( stock_objects::CornerXYZ() );
	}
    win3D.setCameraZoom(10);
    win3D.unlockAccess3DScene();
	win3D.repaint();

    CTicTac tictac;
    unsigned int counter = 0;
    // ------------------------------------------------------------------------
    // MAIN LOOP --------------------------------------------------------------
    // ------------------------------------------------------------------------
    while( !mrpt::system::os::kbhit() )
    {
        if( counter == 0 )
            tictac.Tic();

        CObservationPtr obs;
		try	{ obs= cam->getNextFrame();	}
		catch (CExceptionEOF &)	{ /* End of a rawlog file.*/ break; }

		if (!obs)
		{
			cerr << "*Warning* getNextFrame() returned NULL!\n";
			mrpt::system::sleep(50);
			continue;
		}

		CObservationStereoImagesPtr o = CObservationStereoImagesPtr(obs);
		o->leftCamera = leftCamera;
		o->rightCamera = rightCamera;

//		w1.showImage( o->imageLeft );
//		w2.showImage( o->imageRight );

        cv::Mat outMat1( resY, resX, CV_64F );
        cv::Mat outMat2( resY, resX, CV_64F );

        cv::remap( cv::Mat( static_cast<IplImage *>( o->imageLeft.getAsIplImage() ) ), outMat1, mapx1, mapy1, cv::INTER_CUBIC );
        cv::remap( cv::Mat( static_cast<IplImage *>( o->imageRight.getAsIplImage() ) ), outMat2, mapx2, mapy2, cv::INTER_CUBIC );

        IplImage iplim1 = IplImage(outMat1);
        IplImage iplim2 = IplImage(outMat2);

    //    cvSaveImage( "imgs/leftoutimg1024_rect.jpg", &iplim1 );
    //    cvSaveImage( "imgs/rightoutimg1024_rect.jpg", &iplim2 );

        // Insert them into the observation
        o->imageLeft.loadFromIplImage( &iplim1 );
        o->imageRight.loadFromIplImage( &iplim2 );

//        w3.showImage( o->imageLeft );
//        w4.showImage( o->imageRight );

        if( counter%10 == 0 )
        {
            cout << "FPS: " << 10.0/tictac.Tac() << endl;
            tictac.Tic();
        }
        counter++;

        poses::CPose3DQuatPDFGaussian outEst;
        vOdometer.process_light( o, outEst );
        const TOdometryInfo info = vOdometer.getInfo();

        win.showImagesAndMatchedPoints( info.m_obs->imageLeft, info.m_obs->imageRight, info.m_mfList );
        //cout << "OUT: " << outEst.mean << endl;
    /**/
    // *****************************************************************

//
//    cout << "Extracting features with type: " << fExt.options.featsType;
//    cout << " and patch size: " << fExt.options.patchSize << endl;
//
//    CFeatureList leftList, rightList;
//    fExt.detectFeatures( o->imageLeft, leftList );
//    fExt.detectFeatures( o->imageRight, rightList );
//
//    leftList.saveToTextFile( "imgs/leftlist.txt" );
//    rightList.saveToTextFile( "imgs/rightlist.txt" );
//
//    CMatchedFeatureList outMatchedList;
//    unsigned int nMatches = mrpt::vision::matchFeatures( leftList, rightList, outMatchedList, matchOptions );
//    cout << "Left features: " << leftList.size() << endl;
//    cout << "Right features: " << rightList.size() << endl;
//    cout << "Matches: " << nMatches << endl;
//
//    // Show matches
//    win.showImagesAndMatchedPoints( o->imageLeft, o->imageRight, outMatchedList );
//
//    mrpt::slam::CLandmarksMap landmarks;
//    mrpt::vision::projectMatchedFeatures( outMatchedList, stereoParams, landmarks);
//    landmarks.changeCoordinatesReference( CPose3D( 0, 0, 0, DEG2RAD(-90), 0, DEG2RAD(-90) ) );
//
//    COpenGLScenePtr &scene = win3D.get3DSceneAndLock();
//    scene->clear();
//    mrpt::opengl::CSetOfObjectsPtr obj = CSetOfObjects::Create();
//    landmarks.getAs3DObject( obj );
////    if( counter == 0 )
////    {
////        obj->setName( "elipses" );
//        scene->insert( obj );
////    }
////    else
////    {
////        mrpt::opengl::CSetOfObjectsPtr my_obj = static_cast<CSetOfObjectsPtr>(scene->getByName( "elipses" ));
////        my_obj->clear();
////        my_obj->insert( obj );
////        scene->insert(obj);
////    }
////
//    win3D.setCameraZoom(10);
//    win3D.unlockAccess3DScene();
//	win3D.repaint();
//
//    mrpt::system::pause();
// HASTA AQUIII
////    CObservationStereoImages obs;
////    obs.imageLeft.copyFastFrom( im1out );
////    obs.imageRight.copyFastFrom( im2out );
//
//    CVisualOdometryStereo vOdometer;
//    vOdometer.loadOptions( *iniFile );

    // if IT_1
    // COMPUTE FAST FEATURES (ADAPTATIVE FAST THRESHOLD)
    // MATCH WITH SAD
    // PROJECT TO 3D
    // STORE

    // if IT_i
    // TRACK FEATURES
    // CHECK MATCHES: EPIPOLAR, OUT_OF_BOUNDS
    // PROJECT TO 3D
    // COMPUTE HORN + COVARIANCE (new)

    // ENOUGH FEATURES? -> FIND MORE FEATURES
    // back to [1]
    }
    return;
} // end vOdometry_onthefly()
示例#14
0
void CMyGLCanvasBase::Render()
{
	CTicTac tictac;
	double	At = 0.1;

    wxPaintDC dc(this);

	if (!m_gl_context) { /*cerr << "[CMyGLCanvasBase::Render] No GL Context!" << endl;*/ return; }
	else SetCurrent(*m_gl_context);

    // Init OpenGL once, but after SetCurrent
    if (!m_init)
    {
        InitGL();
        m_init = true;
    }

    try
    {
    	// Call PreRender user code:
    	OnPreRender();


        glPushAttrib(GL_ALL_ATTRIB_BITS);

        // Set static configs:
        glEnable(GL_DEPTH_TEST);
        glEnable(GL_ALPHA_TEST);
        glEnable(GL_TEXTURE_2D);


        // PART 1a: Set the viewport
        // --------------------------------------
		int width, height;
		GetClientSize(&width, &height);
        glViewport(
            0,
            0,
            (GLsizei)width,
            (GLsizei)height);

        // Set the background color:
        glClearColor(clearColorR,clearColorG,clearColorB,1.0);

        if (m_openGLScene)
        {
			// Set the camera params in the scene:
			if (!useCameraFromScene)
			{
				COpenGLViewportPtr view= m_openGLScene->getViewport("main");
				if (!view)
				{
					THROW_EXCEPTION("Fatal error: there is no 'main' viewport in the 3D scene!");
				}

				view->getCamera().setPointingAt( cameraPointingX, cameraPointingY, cameraPointingZ );
				view->getCamera().setZoomDistance(cameraZoomDistance);
				view->getCamera().setAzimuthDegrees( cameraAzimuthDeg );
				view->getCamera().setElevationDegrees(cameraElevationDeg);
				view->getCamera().setProjectiveModel( cameraIsProjective );
			}

			// PART 2: Set the MODELVIEW matrix
			// --------------------------------------
			glMatrixMode(GL_MODELVIEW);
			glLoadIdentity();

			tictac.Tic();

			// PART 3: Draw primitives:
			// --------------------------------------
			m_openGLScene->render();

        } // end if "m_openGLScene!=NULL"

		OnPostRender();

        // Flush & swap buffers to disply new image:
        glFlush();
		SwapBuffers();

		At = tictac.Tac();

        glPopAttrib();
    }
    catch (std::exception &e)
    {
        glPopAttrib();
		const std::string err_msg = std::string("[CMyGLCanvasBase::Render] Exception!: ") +std::string(e.what());
        std::cerr << err_msg;
        OnRenderError( _U(err_msg.c_str()) );
    }
    catch (...)
    {
        glPopAttrib();
        std::cerr << "Runtime error!" << std::endl;
    }

	OnPostRenderSwapBuffers( At, dc );

}
示例#15
0
文件: test.cpp 项目: 3660628/mrpt
// ------------------------------------------------------
//				TestExtractFeatures
// ------------------------------------------------------
void TestExtractFeatures()
{
	CDisplayWindow		wind1,wind2,wind3,wind4,wind5;
	CFeatureExtraction	fExt;
	CFeatureList		featsHarris, featsKLT, featsSIFT_Hess, featsSIFT_Lowe, featsSIFT_Vedaldi, featsSURF, featsFAST;
	CImage				img;

	if (!img.loadFromFile(the_img_for_extract_feats ))
	{
		cerr << "Cannot load " << the_img_for_extract_feats  << endl;
		return;
	}
	cout << "Loaded test image: " << endl << the_img_for_extract_feats << endl;
	cout << "--------------------------------------------------------------------------" << endl << endl;

	CTicTac	tictac;

	fExt.options.patchSize = 0;

	cout << "Detect Harris features... [f_harris.txt]" << endl;
	tictac.Tic();
	fExt.options.featsType = featHarris;
	fExt.detectFeatures( img, featsHarris );
	cout << "Detected " << featsHarris.size() << " features in ";
	cout << format("  %.03fms",tictac.Tac()*1000) << endl << endl;
	featsHarris.saveToTextFile("f_harris.txt");
	wind1.setWindowTitle("Harris detected features");
	wind1.showImageAndPoints(img, featsHarris);

	cout << "Detect FAST features... [f_fast.txt]" << endl;
	tictac.Tic();
	fExt.options.featsType = featFAST;
	fExt.options.FASTOptions.threshold = 15; //150;
	fExt.options.FASTOptions.min_distance = 4;
	fExt.options.FASTOptions.use_KLT_response = true;
	fExt.detectFeatures( img, featsFAST, 0,  500 /* max num feats */  );
	cout << "Detected " << featsFAST.size() << " features in ";
	cout << format("  %.03fms",tictac.Tac()*1000) << endl << endl;
	featsFAST.saveToTextFile("f_fast.txt");
	wind5.setWindowTitle("FAST detected features");
	wind5.showImageAndPoints( img, featsFAST );

	cout << "Computing SIFT descriptors only ... [f_harris+sift.txt]" << endl;
	tictac.Tic();
	fExt.options.SIFTOptions.implementation = CFeatureExtraction::Hess;
	fExt.computeDescriptors( img, featsHarris, descSIFT );
	cout << format("  %.03fms",tictac.Tac()*1000) << endl << endl;
	featsHarris.saveToTextFile("f_harris+sift.txt");

	cout << "Extracting KLT features... [f_klt.txt]" << endl;
	tictac.Tic();
	fExt.options.featsType = featKLT;
	fExt.options.KLTOptions.threshold	= 0.05f;
	fExt.options.KLTOptions.radius		= 5;
	fExt.detectFeatures( img, featsKLT, 0, 10 );
	cout << "Detected " << featsKLT.size() << " features in ";
	cout << format("  %.03fms",tictac.Tac()*1000) << endl << endl;
	featsKLT.saveToTextFile("f_klt.txt");
	wind2.setWindowTitle("KLT detected features");
	wind2.showImageAndPoints( img, featsKLT );

	cout << "Extracting SIFT features... [f_sift_hess.txt]" << endl;
	tictac.Tic();
	fExt.options.featsType = featSIFT;
	fExt.options.SIFTOptions.implementation = CFeatureExtraction::Hess;
	fExt.detectFeatures( img, featsSIFT_Hess );
	cout << "Detected " << featsSIFT_Hess.size() << " features in ";
	cout << format("  %.03fms",tictac.Tac()*1000) << endl << endl;
	featsSIFT_Hess.saveToTextFile("f_sift_hess.txt");
	wind3.setWindowTitle("SIFT Hess detected features");
	wind3.showImageAndPoints( img, featsSIFT_Hess );

	cout << "Extracting SURF features... [f_surf.txt]" << endl;
	tictac.Tic();
	fExt.options.featsType = featSURF;
	fExt.detectFeatures( img, featsSURF );
	cout << "Detected " << featsSURF.size() << " features in ";
	cout << format("  %.03fms",tictac.Tac()*1000) << endl << endl;
	featsSURF.saveToTextFile("f_surf.txt");
	wind4.setWindowTitle("SURF detected features");
	wind4.showImageAndPoints( img, featsSURF );

	cout << "Computing spin images descriptors only ... [f_harris+spinimgs.txt]" << endl;
	tictac.Tic();
	fExt.options.SpinImagesOptions.radius = 13;
	fExt.options.SpinImagesOptions.hist_size_distance  = 10;
	fExt.options.SpinImagesOptions.hist_size_intensity = 10;
	fExt.computeDescriptors( img, featsHarris, descSpinImages );
	cout << format("  %.03fms",tictac.Tac()*1000) << endl << endl;
	featsHarris.saveToTextFile("f_harris+spinimgs.txt");
	
	mrpt::system::pause();

	return;
}
示例#16
0
int main(int argc, char** argv)
{
	try
	{
		if (argc != 1)
		{
			cerr << "Usage: " << argv[0] << " \n";
			cerr << "Example: " << argv[0] << " \n";
			return 1;
		}

		////  #if MRPT_HAS_CXX11
		////		typedef std::unique_ptr<COpenNI2_RGBD360> COpenNI2Ptr;  //
		/// This
		/// assures automatic destruction
		////  #else
		////		typedef std::auto_ptr<COpenNI2_RGBD360> COpenNI2Ptr;  //
		/// This
		/// assures automatic destruction
		////  #endif
		//

		// Only one of these will be actually used:
		COpenNI2_RGBD360 rgbd360;  // = new COpenNI2_RGBD360();

		// Set params:
		rgbd360.enableGrabRGB(true);
		rgbd360.enableGrabDepth(true);
		//    rgbd360.enableGrab3DPoints(p.generate_3D_pointcloud_in_this_thread);

		//    rgbd360.loadConfig_sensorSpecific(const
		//    mrpt::utils::CConfigFileBase &configSource,	const std::string
		//    &iniSection );

		// Open:
		cout << "Calling COpenNI2_RGBD360::initialize()...";
		//    rgbd360.m_num_sensors = num_sensors;
		rgbd360.initialize();
		std::this_thread::sleep_for(2000ms);  // Sleep 2s
		cout << "OK " << rgbd360.numDevices << " available devices." << endl;

		const unsigned num_sensors = NUM_SENSORS;
		vector<std::thread> v_stitch_hd(num_sensors);
		vector<bool> v_stitch_im(num_sensors);

		//    cout << "Create windows\n";
		vector<mrpt::gui::CDisplayWindow::Ptr> win(num_sensors);
		//    vector<CObservation3DRangeScan::Ptr> newObs(num_sensors);
		CObservationRGBD360 newObs;
		for (unsigned i = 0; i < num_sensors; i++)
		{
			win[i] = mrpt::make_aligned_shared<mrpt::gui::CDisplayWindow>();
			//      newObs[i] =
			//      mrpt::make_aligned_shared<CObservation3DRangeScan>();
		}

		cout << "Get observation\n";
		bool bObs = false, bError = true;
		//    rgbd360.getNextObservation(newObs, bObs, bError);
		//    cout << "Get observation OK\n";

		CTicTac tictac;
		tictac.Tic();
		unsigned int nFrames = 0;

		while (!system::os::kbhit())
		{
			//    cout << "Get a new frame\n";
			rgbd360.getNextObservation(newObs, bObs, bError);

			double fps = ++nFrames / tictac.Tac();
			//      newObs->intensityImage.textOut(5,5,format("%.02f
			//      fps",fps),TColor(0x80,0x80,0x80) );
			cout << "FPS: " << fps << endl;

			if (nFrames > 100)
			{
				tictac.Tic();
				nFrames = 0;
			}

			for (unsigned i = 0; i < num_sensors; i++)
				win[i]->showImage(newObs.intensityImages[i]);
			//      win.showImage(newObs.intensityImage);
			std::this_thread::sleep_for(10ms);
		}

		//    // Create window and prepare OpenGL object in the scene:
		//    // --------------------------------------------------------
		//    mrpt::gui::CDisplayWindow3D  win3D("OpenNI2 3D view",800,600);
		//
		//    win3D.setCameraAzimuthDeg(140);
		//    win3D.setCameraElevationDeg(20);
		//    win3D.setCameraZoom(8.0);
		//    win3D.setFOV(90);
		//    win3D.setCameraPointingToPoint(2.5,0,0);
		//
		//    mrpt::opengl::CPointCloudColoured::Ptr gl_points =
		//    mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>();
		//    gl_points->setPointSize(2.5);
		//
		//    opengl::COpenGLViewport::Ptr viewInt; // Extra viewports for the
		//    RGB images.
		//    {
		//      mrpt::opengl::COpenGLScene::Ptr &scene =
		//      win3D.get3DSceneAndLock();
		//
		//      // Create the Opengl object for the point cloud:
		//      scene->insert( gl_points );
		//      scene->insert(
		//      mrpt::make_aligned_shared<mrpt::opengl::CGridPlaneXY>() );
		//      scene->insert( mrpt::opengl::stock_objects::CornerXYZ() );
		//
		//      const double aspect_ratio =  480.0 / 640.0;
		//      const int VW_WIDTH = 400;	// Size of the viewport into the
		//      window, in pixel units.
		//      const int VW_HEIGHT = aspect_ratio*VW_WIDTH;
		//
		//      // Create an extra opengl viewport for the RGB image:
		//      viewInt = scene->createViewport("view2d_int");
		//      viewInt->setViewportPosition(5, 30, VW_WIDTH,VW_HEIGHT );
		//      win3D.addTextMessage(10, 30+VW_HEIGHT+10,"Intensity
		//      data",TColorf(1,1,1), 2, MRPT_GLUT_BITMAP_HELVETICA_12 );
		//
		//      win3D.addTextMessage(5,5,
		//        format("'o'/'i'-zoom out/in, ESC: quit"),
		//          TColorf(0,0,1), 110, MRPT_GLUT_BITMAP_HELVETICA_18 );
		//
		//      win3D.unlockAccess3DScene();
		//      win3D.repaint();
		//    }
		//
		//
		//    //							Grab frames continuously and show
		//    //========================================================================================
		//
		//    bool bObs = false, bError = true;
		//    mrpt::system::TTimeStamp  last_obs_tim = INVALID_TIMESTAMP;
		//
		//    while (!win3D.keyHit())	//Push any key to exit // win3D.isOpen()
		//    {
		////    cout << "Get new observation\n";
		//      CObservation3DRangeScan::Ptr newObs =
		//      mrpt::make_aligned_shared<CObservation3DRangeScan>();
		//      rgbd360.getNextObservation(*newObs, bObs, bError);
		//
		//      if (bObs && !bError && newObs &&
		//      newObs->timestamp!=INVALID_TIMESTAMP &&
		//      newObs->timestamp!=last_obs_tim )
		//      {
		//        // It IS a new observation:
		//        last_obs_tim = newObs->timestamp;
		//
		//        // Update visualization
		//        ---------------------------------------
		//
		//        win3D.get3DSceneAndLock();
		//
		//        // Estimated grabbing rate:
		//        win3D.addTextMessage(-350,-13, format("Timestamp: %s",
		//        mrpt::system::dateTimeLocalToString(last_obs_tim).c_str()),
		//        TColorf(0.6,0.6,0.6),"mono",10,mrpt::opengl::FILL, 100);
		//
		//        // Show intensity image:
		//        if (newObs->hasIntensityImage )
		//        {
		//          viewInt->setImageView(newObs->intensityImage); // This is
		//          not "_fast" since the intensity image may be needed later
		//          on.
		//        }
		//        win3D.unlockAccess3DScene();
		//
		//        // -------------------------------------------------------
		//        //           Create 3D points from RGB+D data
		//        //
		//        // There are several methods to do this.
		//        //  Switch the #if's to select among the options:
		//        // See also:
		//        http://www.mrpt.org/Generating_3D_point_clouds_from_RGB_D_observations
		//        // -------------------------------------------------------
		//        if (newObs->hasRangeImage)
		//        {
		//  #if 0
		//          static pcl::PointCloud<pcl::PointXYZRGB> cloud;
		//          newObs->project3DPointsFromDepthImageInto(cloud, false /*
		//          without obs.sensorPose */);
		//
		//          win3D.get3DSceneAndLock();
		//            gl_points->loadFromPointsMap(&cloud);
		//          win3D.unlockAccess3DScene();
		//  #endif
		//
		//  // Pathway: RGB+D --> XYZ+RGB opengl
		//  #if 1
		//          win3D.get3DSceneAndLock();
		//            newObs->project3DPointsFromDepthImageInto(*gl_points,
		//            false /* without obs.sensorPose */);
		//          win3D.unlockAccess3DScene();
		//  #endif
		//        }
		//
		//        win3D.repaint();
		//      } // end update visualization:
		//    }
		//
		//    rgbd360.close();
		//
		//    cout << "\nStopping...\n";
		//
		//    return 0;
	}
	catch (std::exception& e)
	{
		std::cout << "MRPT exception caught: " << e.what() << std::endl;
		return -1;
	}
	catch (...)
	{
		printf("Untyped exception!!");
		return -1;
	}
}
示例#17
0
文件: Main.cpp 项目: DYFeng/mrpt
// ------------------------------------------------------
//					Visual Odometry
// ------------------------------------------------------
void vOdometry_lightweight()
{
	// My Local Variables
	CVisualOdometryStereo				vOdometer;
	unsigned int						step = 0;
	CTicTac								tictac;

	std::vector<CPose3DQuat>			path1;
	std::vector<CPose3DQuatPDFGaussian>	path2;

	size_t								rawlogEntry = 0;
	CFileGZInputStream					rawlogFile( RAWLOG_FILE );

	// Initial pose of the path
	path1.push_back( CPose3DQuat() );
	path2.push_back( CPose3DQuatPDFGaussian() );

	// ----------------------------------------------------------
	//						vOdometry
	// ----------------------------------------------------------
	CActionCollectionPtr	action;
	CSensoryFramePtr		observations;
	CObservationPtr			observation;

	// Load options (stereo + matching + odometry)
	vOdometer.loadOptions( INI_FILENAME );

	// Delete previous files and prepare output dir
	deleteFiles( format("%s/*.txt", OUT_DIR) );

	FILE *f_cov = os::fopen( format( "%s/cov.txt", OUT_DIR ), "wt");
	ASSERT_( f_cov != NULL );

	// Iteration counter
	int	counter = 0;

	FILE *f_log = os::fopen( format( "%s/q.txt", OUT_DIR ), "wt");
	FILE *f_log2 = os::fopen( format( "%s/path.txt", OUT_DIR ), "wt");

	unsigned int imDecimation = 5;

	// Main Loop
	tictac.Tic();
	for (;;)
	{
		if (os::kbhit())
		{
			char c = os::getch();
			if (c==27)
				break;
		}

		// Load action/observation pair from the rawlog:
		// --------------------------------------------------
		if (! CRawlog::getActionObservationPairOrObservation( rawlogFile, action, observations, observation, rawlogEntry) )
			break; // file EOF


		if ( rawlogEntry >= RAWLOG_OFFSET && 0 == ( step % DECIMATION ) )
		{
			// Execute:
			// ----------------------------------------

			// STEREO IMAGES OBSERVATION
			CObservationStereoImagesPtr sImgs;
			if( observation )
				sImgs = CObservationStereoImagesPtr( observation );
			else
				sImgs = observations->getObservationByClass<CObservationStereoImages>();

			poses::CPose3DQuatPDFGaussian outEst;
			if( sImgs )
			{
				if( counter == 0 )
				{
					// Set initial parameters
					vOdometer.stereoParams.baseline = sImgs->rightCameraPose.x();
					vOdometer.stereoParams.K = sImgs->leftCamera.intrinsicParams;
				}

				cout << "Rawlog Entry: " << rawlogEntry << " Iteration: " << counter++ << endl;

				if( step % imDecimation )
				{
					vOdometer.process_light( sImgs, outEst );
					TOdometryInfo info = vOdometer.getInfo();

					// Save to file both the quaternion and covariance matrix
					os::fprintf( f_log,"%f %f %f %f %f %f %f\n",
						outEst.mean[0], outEst.mean[1], outEst.mean[2], outEst.mean[3], outEst.mean[4], outEst.mean[5], outEst.mean[6] );
					info.m_Prev_cloud.landmarks.saveToTextFile( format( "%s/clouds%04d.txt", OUT_DIR, step ) );

					path1.push_back( path1.back() + outEst.mean );
					os::fprintf( f_log2,"%f %f %f %f %f %f %f\n",
						path1.back()[0], path1.back()[1], path1.back()[2], path1.back()[3], path1.back()[4], path1.back()[5], path1.back()[6] );

				path2.push_back( outEst );
				}
				else
					cout << "Skipped step" << endl;

			} // end if sImgs != NULL

		} // end if 'rawlogEntry >= rawlog_offset'

		step++;

		// Free memory:
		action.clear_unique();
		observations.clear_unique();

	}; // end  while !end
	cout << "*************** Tiempo: " << 1000.0f*tictac.Tac() << "************************" << endl;

	os::fclose( f_cov );
	os::fclose( f_log );
	os::fclose( f_log2 );

	// SAVE THE RESULTS
	/**/
	FILE *fPath1 = os::fopen( format("./%s/EstimatedPath.txt", OUT_DIR).c_str(), "wt");
	if( fPath1 != NULL )
	{
		std::vector<CPose3DQuat>::iterator	itPath;
		for(itPath = path1.begin(); itPath != path1.end(); ++itPath )
			os::fprintf( fPath1,"%f %f %f %f %f %f %f\n",
			itPath->x(), itPath->y(), itPath->z(),
			itPath->quat().r(), itPath->quat().x(), itPath->quat().y(), itPath->quat().z() );

		os::fclose( fPath1 );
	}
	else
		std::cout << "WARNING: The estimated path could not be saved" << std::endl;

	FILE *fPath2 = os::fopen( format("./%s/EstimatedPathPDF.txt", OUT_DIR).c_str(), "wt");
	if( fPath2 != NULL )
	{
		std::vector<CPose3DQuatPDFGaussian>::iterator	itPath;
		for(itPath = path2.begin(); itPath != path2.end(); ++itPath )
		{
			os::fprintf(fPath2,"%.3f %.3f %.3f %.3f %.3f %.3f %.3f\n",
				itPath->mean.x(), itPath->mean.y(), itPath->mean.z(),
				itPath->mean.quat().r(), itPath->mean.quat().x(), itPath->mean.quat().y(), itPath->mean.quat().z() );

			os::fprintf(fPath2,"%.3f %.3f %.3f %.3f %.3f %.3f %.3f\n", itPath->cov.get_unsafe(0,0), itPath->cov.get_unsafe(0,1), itPath->cov.get_unsafe(0,2), itPath->cov.get_unsafe(0,3), itPath->cov.get_unsafe(0,4), itPath->cov.get_unsafe(0,5), itPath->cov.get_unsafe(0,6));
			os::fprintf(fPath2,"%.3f %.3f %.3f %.3f %.3f %.3f %.3f\n", itPath->cov.get_unsafe(1,0), itPath->cov.get_unsafe(1,1), itPath->cov.get_unsafe(1,2), itPath->cov.get_unsafe(1,3), itPath->cov.get_unsafe(1,4), itPath->cov.get_unsafe(1,5), itPath->cov.get_unsafe(1,6));
			os::fprintf(fPath2,"%.3f %.3f %.3f %.3f %.3f %.3f %.3f\n", itPath->cov.get_unsafe(2,0), itPath->cov.get_unsafe(2,1), itPath->cov.get_unsafe(2,2), itPath->cov.get_unsafe(2,3), itPath->cov.get_unsafe(2,4), itPath->cov.get_unsafe(2,5), itPath->cov.get_unsafe(2,6));
			os::fprintf(fPath2,"%.3f %.3f %.3f %.3f %.3f %.3f %.3f\n", itPath->cov.get_unsafe(3,0), itPath->cov.get_unsafe(3,1), itPath->cov.get_unsafe(3,2), itPath->cov.get_unsafe(3,3), itPath->cov.get_unsafe(3,4), itPath->cov.get_unsafe(3,5), itPath->cov.get_unsafe(3,6));
			os::fprintf(fPath2,"%.3f %.3f %.3f %.3f %.3f %.3f %.3f\n", itPath->cov.get_unsafe(4,0), itPath->cov.get_unsafe(4,1), itPath->cov.get_unsafe(4,2), itPath->cov.get_unsafe(4,3), itPath->cov.get_unsafe(4,4), itPath->cov.get_unsafe(4,5), itPath->cov.get_unsafe(4,6));
			os::fprintf(fPath2,"%.3f %.3f %.3f %.3f %.3f %.3f %.3f\n", itPath->cov.get_unsafe(5,0), itPath->cov.get_unsafe(5,1), itPath->cov.get_unsafe(5,2), itPath->cov.get_unsafe(5,3), itPath->cov.get_unsafe(5,4), itPath->cov.get_unsafe(5,5), itPath->cov.get_unsafe(5,6));
			os::fprintf(fPath2,"%.3f %.3f %.3f %.3f %.3f %.3f %.3f\n", itPath->cov.get_unsafe(6,0), itPath->cov.get_unsafe(6,1), itPath->cov.get_unsafe(6,2), itPath->cov.get_unsafe(6,3), itPath->cov.get_unsafe(6,4), itPath->cov.get_unsafe(6,5), itPath->cov.get_unsafe(6,6));
		}
		os::fclose( fPath2 );
	}
	else
		std::cout << "WARNING: The estimated pdf path could not be saved" << std::endl;

	std::cout << "Saved results!" << std::endl;
	/**/

	mrpt::system::pause();

}
示例#18
0
// ------------------------------------------------------
//				TestDisplay3D
// ------------------------------------------------------
void TestDisplay3D()
{
	CDisplayWindow3D	win("Example of 3D Scene Visualization - MRPT",640,480);

	COpenGLScenePtr &theScene = win.get3DSceneAndLock();

	// The unique instance of the observer class:
	TMyExtraRenderingStuff   my_extra_rendering;

	// And start subscribing to the viewport events:
	opengl::COpenGLViewportPtr the_main_view = theScene->getViewport("main");
	my_extra_rendering.observeBegin( *the_main_view );


	// Modify the scene:
	// ------------------------------------------------------
	{
		opengl::CGridPlaneXYPtr obj = opengl::CGridPlaneXY::Create(-20,20,-20,20,0,1);
		obj->setColor(0.8,0.8,0.8);
		theScene->insert( obj );
	}

	theScene->insert( mrpt::opengl::stock_objects::CornerXYZ() );

	if (1)
	{
		opengl::CAxisPtr obj = opengl::CAxis::Create();
		obj->setFrequency(5);
		obj->enableTickMarks();
		obj->setAxisLimits(-10,-10,-10, 10,10,10);
		theScene->insert( obj );
	}

	{
		opengl::CSpherePtr obj = opengl::CSphere::Create();
		obj->setColor(0,0,1);
		obj->setRadius(0.3);
		obj->setLocation(0,0,1);
		obj->setName( "ball_1" );
		theScene->insert( obj );

		// And also let my rendering object access this ball properties:
		my_extra_rendering.ball_obj = obj;
	}

	// IMPORTANT!!! IF NOT UNLOCKED, THE WINDOW WILL NOT BE UPDATED!
	win.unlockAccess3DScene();

	// Texts:
	win.addTextMessage(0.01,0.85, "This is a 2D message", TColorf(1,1,1),0,MRPT_GLUT_BITMAP_TIMES_ROMAN_10 );

	win.setCameraElevationDeg( 25.0f );
	//win.setCameraProjective(false);

	cout << endl;
	cout << "Control with mouse or keyboard. Valid keys:" << endl;
	cout << "  ESC                        -> Exit" << endl;
	cout << "  Left/right cursor arrow    -> Camera azimuth" << endl;
	cout << endl;

	bool end = false;

	CTicTac  timer;
	timer.Tic();

	while (!end && win.isOpen() )
	{
		// Move the scene:
		COpenGLScenePtr &theScene = win.get3DSceneAndLock();

		opengl::CRenderizablePtr obj1 = theScene->getByName("ball_1");
		const double t = timer.Tac();
		const double R = 8;
		const double W = 5.0, Q = 3.3;
		obj1->setLocation(
			R* cos(W*t)*sin(Q*t),
			R* sin(W*t),
			R* cos(W*t)*cos(Q*t) );


		// Update the texts on the gl display:
		win.addTextMessage(
			5,5,
			mrpt::format("FPS=%5.02f", win.getRenderingFPS() ),
			TColorf(1,1,1),0, MRPT_GLUT_BITMAP_HELVETICA_18 );

		// IMPORTANT!!! IF NOT UNLOCKED, THE WINDOW WILL NOT BE UPDATED!
		win.unlockAccess3DScene();

		// Update window:
		win.forceRepaint();
		mrpt::system::sleep(1);

		if (mrpt::system::os::kbhit()) end = true;
		if (win.keyHit())
		{
			mrptKeyModifier kmods;
			int key = win.getPushedKey(&kmods);
			//printf("Key pushed: %c (%i) - modifiers: 0x%04X\n",char(key),key,kmods);

			if (key==MRPTK_ESCAPE) end = true;

			if (key=='h' || key=='H')
			{
				if (!my_extra_rendering.showing_help)
				{
					my_extra_rendering.tim_show_start.Tic();
					my_extra_rendering.showing_help = true;
				}
				else
				{
					my_extra_rendering.tim_show_end.Tic();
					my_extra_rendering.showing_help = false;
					my_extra_rendering.hiding_help = true;
				}
			}

			if (key==MRPTK_RIGHT) win.setCameraAzimuthDeg( win.getCameraAzimuthDeg() + 5 );
			if (key==MRPTK_LEFT)  win.setCameraAzimuthDeg( win.getCameraAzimuthDeg() - 5 );
		}

	};
}
示例#19
0
文件: test.cpp 项目: afrancocf/mrpt
int main()
{
	try
	{
		CBoardSonars		sonarBoard;
		CObservationRange	obs;
		std::string			firmVers;
		CTicTac				tictac;

		CDisplayWindow3D	wind("Sonar representation");
		COpenGLScenePtr		&scene = wind.get3DSceneAndLock();

		scene->insert( mrpt::opengl::CGridPlaneXY::Create( -20,20,-20,20,0,1 ) );
		scene->insert( mrpt::opengl::stock_objects::RobotPioneer() );
		//scene->insert( mrpt::opengl::CCylinder::Create(1, 1, 2.0f) );
		wind.unlockAccess3DScene();

		// Load configuration:
		ASSERT_( mrpt::system::fileExists("CONFIG_sonars.ini") );
		CConfigFile conf("CONFIG_sonars.ini");
		sonarBoard.loadConfig( conf, "BOARD_SONAR_CONFIG");

		while ( !mrpt::system::os::kbhit() )
		{
			if (!sonarBoard.queryFirmwareVersion( firmVers ) )
			{
				cout << "Cannot connect to USB device... Retrying in 1 sec" << endl;
				mrpt::system::sleep(1000);
			}
			else
			{
				cout << "FIRMWARE VERSION: " << firmVers << endl;
				break;
			}
		}

		cout << "Select operation:" << endl;
		cout << " 1. Get measures from device" << endl;
		cout << " 2. Program a new I2C address to a single sonar" << endl;
		cout << "?";

		char c = os::getch();
		if (c=='1')
		{
			while ( !mrpt::system::os::kbhit() )
			{
				tictac.Tic();
				if (sonarBoard.getObservation( obs ))
				{
					double T = tictac.Tac();
					mrpt::system::clearConsole();

					printf("RX: %u ranges in %.03fms\n",(unsigned int)obs.sensedData.size(), T*1000);
					scene = wind.get3DSceneAndLock();
					for (size_t i=0;i<obs.sensedData.size();i++)
					{
						printf("[ID:%i]=%15f   0x%04X\n",obs.sensedData[i].sensorID,obs.sensedData[i].sensedDistance, (int)(100*obs.sensedData[i].sensedDistance) );

						// Show the distances
						std::string obj = format("sonar%i",obs.sensedData[i].sensorID);
						mrpt::opengl::CCylinderPtr sonarRange;
						mrpt::opengl::CRenderizablePtr objPtr = scene->getByName( obj );
						if( !objPtr )
						{
							sonarRange = mrpt::opengl::CCylinder::Create(0.0f,0.0f,1.0f,30,10);
							sonarRange->setName( obj );
							scene->insert( sonarRange );
						}
						else
							sonarRange = CCylinderPtr( objPtr );

						sonarRange->setRadii( 0, tan( obs.sensorConeApperture )*obs.sensedData[i].sensedDistance );
						sonarRange->setPose( CPose3D(obs.sensedData[i].sensorPose)+CPose3D(0,0,0,0,DEG2RAD(90.0),0) );
						sonarRange->setHeight( obs.sensedData[i].sensedDistance );
						sonarRange->enableShowName();
						sonarRange->setColor( 0, 0, 1, 0.25 );

					}
					wind.unlockAccess3DScene();
					wind.repaint();
				}
				else
				{
					cerr << "Error rx..." << endl;
					//return -1;
				}

				mrpt::system::sleep(200);
			}
		}
		else
		if (c=='2')
		{
			int		curAddr,newAddr;
			cout << "Enter current address: (decimal, 0 to 15)" << endl;
			if (1==scanf("%i",&curAddr))
			{
				cout << "Enter new address: (decimal, 0 to 15)" << endl;
				if (1==scanf("%i",&newAddr))
				{
					ASSERT_(curAddr>=0 && curAddr<16);
					ASSERT_(newAddr>=0 && newAddr<16);
					printf("Changing address %i --> %i... ",curAddr,newAddr);

					if (sonarBoard.programI2CAddress(curAddr,newAddr) )
							printf(" DONE!\n");
					else	printf(" ERROR!\n");
				}
			}
		}
	}
	catch(std::exception &e)
	{
		cerr << e.what() << endl;
		return -1;
	}

	return 0;
}
示例#20
0
文件: test.cpp 项目: astoeckel/mrpt
// ------------------------------------------------------
//				TestDijkstra
// ------------------------------------------------------
void TestDijkstra()
{
	CTicTac tictac;
	CNetworkOfPoses2D	graph_links;
	CNetworkOfPoses2D::global_poses_t optimal_poses, optimal_poses_dijkstra;
	aligned_containers<TNodeID,CPose2D>::map_t  real_poses;

	randomGenerator.randomize(10);

	// ----------------------------
	// Create a random graph:
	// ----------------------------
	const size_t N_VERTEX = 20;
	const double DIST_THRES = 10;
	const double NODES_XY_MAX = 15;

	vector<float>  xs,ys;

	for (size_t j=0;j<N_VERTEX;j++)
	{
		CPose2D p(
			randomGenerator.drawUniform(-NODES_XY_MAX,NODES_XY_MAX),
			randomGenerator.drawUniform(-NODES_XY_MAX,NODES_XY_MAX),
			randomGenerator.drawUniform(-M_PI,M_PI) );
		real_poses[j] = p;

		// for the figure:
		xs.push_back(p.x());
		ys.push_back(p.y());
	}

	// Add some edges
	for (size_t i=0;i<N_VERTEX;i++)
	{
		for (size_t j=0;j<N_VERTEX;j++)
		{
			if (i==j) continue;
			if ( real_poses[i].distanceTo(real_poses[j]) < DIST_THRES )
				addEdge(i,j,real_poses,graph_links);
		}
	}

	// ----------------------------
	//  Dijkstra
	// ----------------------------
	tictac.Tic();
	const size_t SOURCE_NODE = 0;

	CMyDijkstra  myDijkstra(
		graph_links,
		SOURCE_NODE,
		&myDijkstraWeight);

	cout << "Dijkstra took " << tictac.Tac()*1e3 << " ms for " << graph_links.edges.size() << " edges." << endl;

	// Demo of getting the tree representation of
	//  the graph & visit its nodes:
	// ---------------------------------------------------------
	CMyDijkstra::tree_graph_t   graphAsTree;
	myDijkstra.getTreeGraph(graphAsTree);

	// Text representation of the tree:
	cout << "TREE:\n" << graphAsTree.getAsTextDescription() << endl;

	struct CMyVisitor : public CMyDijkstra::tree_graph_t::Visitor
	{
		virtual void OnVisitNode( const TNodeID parent, const CMyDijkstra::tree_graph_t::TEdgeInfo &edge_to_child, const size_t depth_level )
		{
			cout << string(depth_level*3, ' ');
			cout << edge_to_child.id << endl;
		}
	};

	CMyVisitor myVisitor;

	cout << "Depth-first traverse of graph:\n";
	cout << SOURCE_NODE << endl;
	graphAsTree.visitDepthFirst( SOURCE_NODE, myVisitor );

	cout << endl << "Breadth-first traverse of graph:\n";
	cout << SOURCE_NODE << endl;
	graphAsTree.visitBreadthFirst( SOURCE_NODE, myVisitor );



	// ----------------------------
	// Display results graphically:
	// ----------------------------
	CDisplayWindowPlots	win("Dijkstra example");

	win.hold_on();
	win.axis_equal();

	for (TNodeID i=0;i<N_VERTEX && win.isOpen() ;i++)
	{
		if (i==SOURCE_NODE) continue;

		CMyDijkstra::edge_list_t	  path;

		myDijkstra.getShortestPathTo(i,path);

		cout << "to " << i << " -> #steps= " << path.size() << endl;

		win.setWindowTitle(format("Dijkstra path %u->%u",static_cast<unsigned int>(SOURCE_NODE),static_cast<unsigned int>(i) ));

		win.clf();

		// plot all edges:
		for (CNetworkOfPoses2D::iterator e= graph_links.begin();e!=graph_links.end();++e)
		{
			const CPose2D &p1 = real_poses[ e->first.first ];
			const CPose2D &p2 = real_poses[ e->first.second ];

			vector<float> X(2);
			vector<float> Y(2);
			X[0] = p1.x();  Y[0] = p1.y();
			X[1] = p2.x();  Y[1] = p2.y();
			win.plot(X,Y,"k1");
		}

		// Draw the shortest path:
		for (CMyDijkstra::edge_list_t::const_iterator a=path.begin();a!=path.end();++a)
		{
			const CPose2D &p1 = real_poses[ a->first];
			const CPose2D &p2 = real_poses[ a->second ];

			vector<float> X(2);
			vector<float> Y(2);
			X[0] = p1.x();  Y[0] = p1.y();
			X[1] = p2.x();  Y[1] = p2.y();
			win.plot(X,Y,"g3");
		}

		// Draw All nodes:
		win.plot(xs,ys,".b7");
		win.axis_fit(true);

		cout << "Press any key to show next shortest path, close window to end...\n";
		win.waitForKey();
	}

	win.clear();
}
/************************************************************************************************
*							extractFeaturesKLT
************************************************************************************************/
void CFeatureExtraction::extractFeaturesKLT(
		const mrpt::utils::CImage			&inImg,
		CFeatureList			&feats,
		unsigned int			init_ID,
		unsigned int			nDesiredFeatures,
		const TImageROI			&ROI) const
{
//#define VERBOSE_TIMING

#ifdef VERBOSE_TIMING
	CTicTac tictac;
#endif
		MRPT_START

		#if MRPT_HAS_OPENCV
        const unsigned int MAX_COUNT = 300;

		// -----------------------------------------------------------------
		// Create OpenCV Local Variables
		// -----------------------------------------------------------------
		int				count = 0;
		int				nPts;

#ifdef VERBOSE_TIMING
		tictac.Tic();
#endif
		const cv::Mat img( cv::cvarrToMat( inImg.getAs<IplImage>() ) );

#ifdef VERBOSE_TIMING
		cout << "[KLT] Attach: " << tictac.Tac()*1000.0f << endl;
#endif
		const CImage inImg_gray( inImg, FAST_REF_OR_CONVERT_TO_GRAY );
		const cv::Mat cGrey( cv::cvarrToMat( inImg_gray.getAs<IplImage>() ) );

		nDesiredFeatures <= 0 ? nPts = MAX_COUNT : nPts = nDesiredFeatures;

#ifdef VERBOSE_TIMING
		tictac.Tic();
#endif

#ifdef VERBOSE_TIMING
		cout << "[KLT] Create: " << tictac.Tac()*1000.0f << endl;
#endif
		count = nPts;										// Number of points to find

		// -----------------------------------------------------------------
		// Select good features with subpixel accuracy (USING HARRIS OR KLT)
		// -----------------------------------------------------------------
		const bool use_harris = ( options.featsType == featHarris );

#ifdef VERBOSE_TIMING
		tictac.Tic();
#endif
		std::vector<cv::Point2f> points;
		cv::goodFeaturesToTrack(
			cGrey,points, nPts, 
			(double)options.harrisOptions.threshold,    // for rejecting weak local maxima ( with min_eig < threshold*max(eig_image) )
			(double)options.harrisOptions.min_distance, // minimum distance between features
			cv::noArray(), // mask
			3, // blocksize
			use_harris, /* harris */
			options.harrisOptions.k 
			);
#ifdef VERBOSE_TIMING
		cout << "[KLT] Find feats: " << tictac.Tac()*1000.0f << endl;
#endif

		if( nDesiredFeatures > 0 && count < nPts )
			cout << "\n[WARNING][selectGoodFeaturesKLT]: Only " << count << " of " << nDesiredFeatures << " points could be extracted in the image." << endl;

		if( options.FIND_SUBPIXEL )
		{
#ifdef VERBOSE_TIMING
			tictac.Tic();
#endif
			// Subpixel interpolation
			cv::cornerSubPix(cGrey,points,
				cv::Size(3,3), cv::Size(-1,-1),
				cv::TermCriteria( CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.05 ));

#ifdef VERBOSE_TIMING
			cout << "[KLT] subpixel: " << tictac.Tac()*1000.0f << endl;
#endif
		}

		// -----------------------------------------------------------------
		// Fill output structure
		// -----------------------------------------------------------------
#ifdef VERBOSE_TIMING
		tictac.Tic();
#endif

		feats.clear();
		unsigned int	borderFeats = 0;
		unsigned int	nCFeats		= init_ID;
		int				i			= 0;
		const int		limit		= min( nPts, count );
		int				offset		= (int)this->options.patchSize/2 + 1;
		unsigned int	imgH		= inImg.getHeight();
		unsigned int	imgW		= inImg.getWidth();

		while( i < limit )
		{
			const int xBorderInf = (int)floor( points[i].x - options.patchSize/2 );
			const int xBorderSup = (int)floor( points[i].x + options.patchSize/2 );
			const int yBorderInf = (int)floor( points[i].y - options.patchSize/2 );
			const int yBorderSup = (int)floor( points[i].y + options.patchSize/2 );

			if( options.patchSize==0 || ( (xBorderSup < (int)imgW) && (xBorderInf > 0) && (yBorderSup < (int)imgH) && (yBorderInf > 0) ) )
			{
				CFeaturePtr ft = CFeature::Create();

				ft->type		= featKLT;
				ft->x			= points[i].x;				// X position
				ft->y			= points[i].y;				// Y position
				ft->track_status = status_TRACKED;		    // Feature Status
				ft->response	= 0.0;						// A value proportional to the quality of the feature (unused yet)
				ft->ID			= nCFeats++;				// Feature ID into extraction
				ft->patchSize	= options.patchSize;		// The size of the feature patch

				if( options.patchSize > 0 )
				{
					inImg.extract_patch(
						ft->patch,
						round( ft->x ) - offset,
						round( ft->y ) - offset,
						options.patchSize,
						options.patchSize );				// Image patch surronding the feature
				}

				feats.push_back( ft );

			} // end if
			else
				borderFeats++;

			i++;
		} // end while

#ifdef VERBOSE_TIMING
		cout << "[KLT] Create output: " << tictac.Tac()*1000.0f << endl;
#endif


		#else
			THROW_EXCEPTION("The MRPT has been compiled with MRPT_HAS_OPENCV=0 !");
		#endif

		MRPT_END

} // end of function
示例#22
0
文件: tracker.cpp 项目: EduFdez/mrpt
/************************************************************************************************
 *					    Track Them All tracker *
 ************************************************************************************************/
cv::Mat Tracker::trackThemAll(
	vector<string> files_fullpath_tracking, int tracking_image_counter,
	int remove_lost_feats, int add_new_feats, int max_feats, int patch_size,
	int window_width, int window_height)
{
	tracker->enableTimeLogger(true);  // Do time profiling.

	// Set of parameters common to any tracker implementation:
	// -------------------------------------------------------------
	// To see all the existing params and documentation, see
	// mrpt::vision::CGenericFeatureTracker
	tracker->extra_params["remove_lost_features"] =
		remove_lost_feats;  //;1;   // automatically remove out-of-image and
	// badly tracked features

	tracker->extra_params["add_new_features"] =
		add_new_feats;  // 1;   // track, AND ALSO, add new features
	tracker->extra_params["add_new_feat_min_separation"] = 32;
	tracker->extra_params["minimum_KLT_response_to_add"] = 10;
	tracker->extra_params["add_new_feat_max_features"] = max_feats;  // 350;
	tracker->extra_params["add_new_feat_patch_size"] = patch_size;  // 11;

	tracker->extra_params["update_patches_every"] = 0;  // Don't update patches.

	tracker->extra_params["check_KLT_response_every"] =
		5;  // Re-check the KLT-response to assure features are in good points.
	tracker->extra_params["minimum_KLT_response"] = 5;

	// Specific params for "CFeatureTracker_KL"
	// ------------------------------------------------------
	tracker->extra_params["window_width"] = window_width;  // 5;
	tracker->extra_params["window_height"] = window_height;  // 5;
	// tracker->extra_params["LK_levels"] = 3;
	// tracker->extra_params["LK_max_iters"] = 10;
	// tracker->extra_params["LK_epsilon"] = 0.1;
	// tracker->extra_params["LK_max_tracking_error"] = 150;

	long current_num = tracking_image_counter % files_fullpath_tracking.size();
	CImage theImg;  // The grabbed image:
	theImg.loadFromFile(files_fullpath_tracking.at(current_num));

	// Take the resolution upon first valid frame.
	if (!hasResolution)
	{
		hasResolution = true;
		// cameraParams.scaleToResolution()...
		cameraParams.ncols = theImg.getWidth();
		cameraParams.nrows = theImg.getHeight();
	}

	// Do tracking:
	if (step_num > 1)  // we need "previous_image" to be valid.
	{
		// This single call makes: detection, tracking, recalculation of
		// KLT_response, etc.
		tracker->trackFeatures(previous_image, theImg, trackedFeats);
	}

	// Save the image for the next step:
	previous_image = theImg;

	// Save history of feature observations:
	tracker->getProfiler().enter("Save history");

	for (size_t i = 0; i < trackedFeats.size(); ++i)
	{
		TSimpleFeature& f = trackedFeats[i];

		const TPixelCoordf pxRaw(f.pt.x, f.pt.y);
		TPixelCoordf pxUndist;
		// mrpt::vision::pinhole::undistort_point(pxRaw,pxUndist, cameraParams);
		pxUndist = pxRaw;

		feat_track_history.push_back(
			TFeatureObservation(f.ID, curCamPoseId, pxUndist));
	}
	curCamPoseId++;

	tracker->getProfiler().leave("Save history");

	// now that we're done with the image, we can directly write onto it
	//  for the display
	// ----------------------------------------------------------------
	if (DO_HIST_EQUALIZE_IN_GRAYSCALE && !theImg.isColor())
		theImg.equalizeHistInPlace();
	// Convert to color so we can draw color marks, etc.
	theImg.colorImageInPlace();

	{  // FPS:
		static CTicTac tictac;
		// const double T = tictac.Tac();
		tictac.Tic();
		// const double fps = 1.0 / (std::max(1e-5, T));
		// theImg.filledRectangle(1,1,175,25,TColor(0,0,0));

		// const int current_adapt_thres =
		tracker->getDetectorAdaptiveThreshold();
	}

	// Draw feature tracks
	if (SHOW_FEAT_TRACKS)
	{
		// Update new feature coords:
		tracker->getProfiler().enter("drawFeatureTracks");

		std::set<TFeatureID> observed_IDs;

		// cout << "tracked feats size" << trackedFeats.size() << endl;
		for (size_t i = 0; i < trackedFeats.size(); ++i)
		{
			const TSimpleFeature& ft = trackedFeats[i];
			std::list<TPixelCoord>& seq = feat_tracks[ft.ID];

			// drawMarker(cvImg1, Point(trackedFeats.getFeatureX(i),
			// trackedFeats.getFeatureY(i)),  Scalar(0, 255, 0), MARKER_CROSS,
			// CROSS_SIZE, CROSS_THICKNESS);

			observed_IDs.insert(ft.ID);

			if (seq.size() >= FEATS_TRACK_LEN) seq.erase(seq.begin());
			seq.push_back(ft.pt);

			// Draw:
			if (seq.size() > 1)
			{
				const std::list<TPixelCoord>::const_iterator it_end = seq.end();

				std::list<TPixelCoord>::const_iterator it = seq.begin();
				std::list<TPixelCoord>::const_iterator it_prev = it++;

				for (; it != it_end; ++it)
				{
					theImg.line(
						it_prev->x, it_prev->y, it->x, it->y,
						TColor(190, 190, 190));
					it_prev = it;
				}
			}
		}

		tracker->getProfiler().leave("drawFeatureTracks");

		// Purge old data:
		for (std::map<TFeatureID, std::list<TPixelCoord>>::iterator it =
				 feat_tracks.begin();
			 it != feat_tracks.end();)
		{
			if (observed_IDs.find(it->first) == observed_IDs.end())
			{
				std::map<TFeatureID, std::list<TPixelCoord>>::iterator next_it =
					it;
				next_it++;
				feat_tracks.erase(it);
				it = next_it;
			}
			else
				++it;
		}
	}

	// Draw Tracked feats:
	{
		theImg.selectTextFont("5x7");
		tracker->getProfiler().enter("drawFeatures");
		theImg.drawFeatures(
			trackedFeats, TColor(0, 0, 255), SHOW_FEAT_IDS, SHOW_RESPONSES);
		tracker->getProfiler().leave("drawFeatures");
	}
	step_num++;

	// converting the cv::Mat to a QImage and changing the resolution of the
	// output images
	cv::Mat cvImg1 = cv::cvarrToMat(theImg.getAs<IplImage>());

	return cvImg1;
}
示例#23
0
/*---------------------------------------------------------------
					AlignPDF_robustMatch
---------------------------------------------------------------*/
CPosePDF::Ptr CGridMapAligner::AlignPDF_robustMatch(
	const mrpt::maps::CMetricMap* mm1, const mrpt::maps::CMetricMap* mm2,
	const CPosePDFGaussian& initialEstimationPDF, float* runningTime,
	void* info)
{
	MRPT_START

	ASSERT_(
		options.methodSelection == CGridMapAligner::amRobustMatch ||
		options.methodSelection == CGridMapAligner::amModifiedRANSAC);

	TReturnInfo outInfo;
	mrpt::tfest::TMatchingPairList& correspondences =
		outInfo.correspondences;  // Use directly this placeholder to save 1
	// variable & 1 copy.
	mrpt::tfest::TMatchingPairList largestConsensusCorrs;

	CTicTac* tictac = nullptr;

	CPose2D grossEst = initialEstimationPDF.mean;

	CLandmarksMap::Ptr lm1(new CLandmarksMap());
	CLandmarksMap::Ptr lm2(new CLandmarksMap());

	std::vector<size_t> idxs1, idxs2;
	std::vector<size_t>::iterator it1, it2;

	// Asserts:
	// -----------------
	const CMultiMetricMap* multimap1 = nullptr;
	const CMultiMetricMap* multimap2 = nullptr;
	const COccupancyGridMap2D* m1 = nullptr;
	const COccupancyGridMap2D* m2 = nullptr;

	if (IS_CLASS(mm1, CMultiMetricMap) && IS_CLASS(mm2, CMultiMetricMap))
	{
		multimap1 = static_cast<const CMultiMetricMap*>(mm1);
		multimap2 = static_cast<const CMultiMetricMap*>(mm2);

		ASSERT_(multimap1->m_gridMaps.size() && multimap1->m_gridMaps[0]);
		ASSERT_(multimap2->m_gridMaps.size() && multimap2->m_gridMaps[0]);

		m1 = multimap1->m_gridMaps[0].get();
		m2 = multimap2->m_gridMaps[0].get();
	}
	else if (
		IS_CLASS(mm1, COccupancyGridMap2D) &&
		IS_CLASS(mm2, COccupancyGridMap2D))
	{
		m1 = static_cast<const COccupancyGridMap2D*>(mm1);
		m2 = static_cast<const COccupancyGridMap2D*>(mm2);
	}
	else
		THROW_EXCEPTION(
			"Metric maps must be of classes COccupancyGridMap2D or "
			"CMultiMetricMap");

	ASSERTMSG_(
		m1->getResolution() == m2->getResolution(),
		mrpt::format(
			"Different resolutions for m1, m2:\n"
			"\tres(m1) = %f\n\tres(m2) = %f\n",
			m1->getResolution(), m2->getResolution()));

	// The algorithm output auxiliar info:
	// -------------------------------------------------
	outInfo.goodness = 1.0f;

	outInfo.landmarks_map1 = lm1;
	outInfo.landmarks_map2 = lm2;

	// The PDF to estimate:
	// ------------------------------------------------------
	CPosePDFSOG::Ptr pdf_SOG = mrpt::make_aligned_shared<CPosePDFSOG>();

	// Extract features from grid-maps:
	// ------------------------------------------------------
	const size_t N1 =
		std::max(40, mrpt::round(m1->getArea() * options.featsPerSquareMeter));
	const size_t N2 =
		std::max(40, mrpt::round(m2->getArea() * options.featsPerSquareMeter));

	m_grid_feat_extr.extractFeatures(
		*m1, *lm1, N1, options.feature_descriptor,
		options.feature_detector_options);
	m_grid_feat_extr.extractFeatures(
		*m2, *lm2, N2, options.feature_descriptor,
		options.feature_detector_options);

	if (runningTime)
	{
		tictac = new CTicTac();
		tictac->Tic();
	}

	const size_t nLM1 = lm1->size();
	const size_t nLM2 = lm2->size();

	//  At least two landmarks at each map!
	// ------------------------------------------------------
	if (nLM1 < 2 || nLM2 < 2)
	{
		outInfo.goodness = 0;
	}
	else
	{
		//#define METHOD_FFT
		//#define DEBUG_SHOW_CORRELATIONS

		// Compute correlation between landmarks:
		// ---------------------------------------------
		CMatrixFloat CORR(lm1->size(), lm2->size()), auxCorr;
		CImage im1, im2;  // Grayscale
		CVectorFloat corr;
		unsigned int corrsCount = 0;
		std::vector<bool> hasCorr(nLM1, false);

		const double thres_max = options.threshold_max;
		const double thres_delta = options.threshold_delta;

		// CDisplayWindowPlots::Ptr	auxWin;
		if (options.debug_show_corrs)
		{
			// auxWin = CDisplayWindowPlots::Ptr( new
			// CDisplayWindowPlots("Individual corr.") );
			std::cerr << "Warning: options.debug_show_corrs has no effect "
						 "since MRPT 0.9.1\n";
		}

		for (size_t idx1 = 0; idx1 < nLM1; idx1++)
		{
			// CVectorFloat  	corrs_indiv;
			vector<pair<size_t, float>> corrs_indiv;  // (index, distance);
			// Index is used to
			// recover the original
			// index after sorting.
			vector<float> corrs_indiv_only;
			corrs_indiv.reserve(nLM2);
			corrs_indiv_only.reserve(nLM2);

			for (size_t idx2 = 0; idx2 < nLM2; idx2++)
			{
				float minDist;
				minDist =
					lm1->landmarks.get(idx1)->features[0]->descriptorDistanceTo(
						*lm2->landmarks.get(idx2)->features[0]);

				corrs_indiv.emplace_back(idx2, minDist);
				corrs_indiv_only.push_back(minDist);
			}  // end for idx2

			// double corr_mean,corr_std;
			// mrpt::math::meanAndStd(corrs_indiv_only,corr_mean,corr_std);
			const double corr_best = mrpt::math::minimum(corrs_indiv_only);
			// cout << "M: " << corr_mean << " std: " << corr_std << " best: "
			// << corr_best << endl;

			// Sort the list and keep the N best features:
			std::sort(corrs_indiv.begin(), corrs_indiv.end(), myVectorOrder);

			// const size_t nBestToKeep = std::min( (size_t)30,
			// corrs_indiv.size() );
			const size_t nBestToKeep = corrs_indiv.size();

			for (size_t w = 0; w < nBestToKeep; w++)
			{
				if (corrs_indiv[w].second <= thres_max &&
					corrs_indiv[w].second <= (corr_best + thres_delta))
				{
					idxs1.push_back(idx1);
					idxs2.push_back(corrs_indiv[w].first);
					outInfo.correspondences_dists_maha.emplace_back(
						idx1, corrs_indiv[w].first, corrs_indiv[w].second);
				}
			}
		}  // end for idx1

		// Save an image with each feature and its matches:
		if (options.save_feat_coors)
		{
			mrpt::system::deleteFilesInDirectory("grid_feats");
			mrpt::system::createDirectory("grid_feats");

			std::map<size_t, std::set<size_t>> corrs_by_idx;
			for (size_t l = 0; l < idxs1.size(); l++)
				corrs_by_idx[idxs1[l]].insert(idxs2[l]);

			for (auto& it : corrs_by_idx)
			{
				CMatrixFloat descriptor1;
				lm1->landmarks.get(it.first)
					->features[0]
					->getFirstDescriptorAsMatrix(descriptor1);

				im1 = CImage(descriptor1, true);

				const size_t FEAT_W = im1.getWidth();
				const size_t FEAT_H = im1.getHeight();
				const size_t nF = it.second.size();

				CImage img_compose(FEAT_W * 2 + 15, 10 + (5 + FEAT_H) * nF);
				img_compose.filledRectangle(
					0, 0, img_compose.getWidth() - 1,
					img_compose.getHeight() - 1, TColor::black());

				img_compose.drawImage(5, 5, im1);

				size_t j;
				std::set<size_t>::iterator it_j;
				string fil =
					format("grid_feats/_FEAT_MATCH_%03i", (int)it.first);

				for (j = 0, it_j = it.second.begin(); j < nF; ++j, ++it_j)
				{
					fil += format("_%u", static_cast<unsigned int>(*it_j));

					CMatrixFloat descriptor2;
					lm2->landmarks.get(*it_j)
						->features[0]
						->getFirstDescriptorAsMatrix(descriptor2);
					im2 = CImage(descriptor2, true);
					img_compose.drawImage(
						10 + FEAT_W, 5 + j * (FEAT_H + 5), im2);
				}
				fil += ".png";
				img_compose.saveToFile(fil);
			}  // end for map
		}

		// ------------------------------------------------------------------
		// Create the list of correspondences from the lists: idxs1 & idxs2
		// ------------------------------------------------------------------
		correspondences.clear();
		for (it1 = idxs1.begin(), it2 = idxs2.begin(); it1 != idxs1.end();
			 ++it1, ++it2)
		{
			mrpt::tfest::TMatchingPair mp;
			mp.this_idx = *it1;
			mp.this_x = lm1->landmarks.get(*it1)->pose_mean.x;
			mp.this_y = lm1->landmarks.get(*it1)->pose_mean.y;
			mp.this_z = lm1->landmarks.get(*it1)->pose_mean.z;

			mp.other_idx = *it2;
			mp.other_x = lm2->landmarks.get(*it2)->pose_mean.x;
			mp.other_y = lm2->landmarks.get(*it2)->pose_mean.y;
			mp.other_z = lm2->landmarks.get(*it2)->pose_mean.z;
			correspondences.push_back(mp);

			if (!hasCorr[*it1])
			{
				hasCorr[*it1] = true;
				corrsCount++;
			}
		}  // end for corres.

		outInfo.goodness = (2.0f * corrsCount) / (nLM1 + nLM2);

		// Compute the estimation using ALL the correspondences (NO ROBUST):
		// ----------------------------------------------------------------------
		mrpt::math::TPose2D noRobustEst;
		if (!mrpt::tfest::se2_l2(correspondences, noRobustEst))
		{
			// There's no way to match the maps! e.g. no correspondences
			outInfo.goodness = 0;
			pdf_SOG->clear();
			outInfo.sog1 = pdf_SOG;
			outInfo.sog2 = pdf_SOG;
			outInfo.sog3 = pdf_SOG;
		}
		else
		{
			outInfo.noRobustEstimation = mrpt::poses::CPose2D(noRobustEst);
			MRPT_LOG_INFO(mrpt::format(
				"[CGridMapAligner] Overall estimation(%u corrs, total: "
				"%u): (%.03f,%.03f,%.03fdeg)\n",
				corrsCount, (unsigned)correspondences.size(),
				outInfo.noRobustEstimation.x(), outInfo.noRobustEstimation.y(),
				RAD2DEG(outInfo.noRobustEstimation.phi())));

			// The list of SOG modes & their corresponding sub-sets of
			// matchings:
			using TMapMatchingsToPoseMode = mrpt::aligned_std_map<
				mrpt::tfest::TMatchingPairList, CPosePDFSOG::TGaussianMode>;
			TMapMatchingsToPoseMode sog_modes;

			// ---------------------------------------------------------------
			// Now, we have to choose between the methods:
			//  - CGridMapAligner::amRobustMatch  ("normal" RANSAC)
			//  - CGridMapAligner::amModifiedRANSAC
			// ---------------------------------------------------------------
			if (options.methodSelection == CGridMapAligner::amRobustMatch)
			{
				// ====================================================
				//             METHOD: "Normal" RANSAC
				// ====================================================

				// RANSAC over the found correspondences:
				// -------------------------------------------------
				const unsigned int min_inliers =
					options.ransac_minSetSizeRatio * (nLM1 + nLM2) / 2;

				mrpt::tfest::TSE2RobustParams tfest_params;
				tfest_params.ransac_minSetSize = min_inliers;
				tfest_params.ransac_maxSetSize = nLM1 + nLM2;
				tfest_params.ransac_mahalanobisDistanceThreshold =
					options.ransac_mahalanobisDistanceThreshold;
				tfest_params.ransac_nSimulations = 0;  // 0=auto
				tfest_params.ransac_fuseByCorrsMatch = true;
				tfest_params.ransac_fuseMaxDiffXY = 0.01;
				tfest_params.ransac_fuseMaxDiffPhi = DEG2RAD(0.1);
				tfest_params.ransac_algorithmForLandmarks = true;
				tfest_params.probability_find_good_model =
					options.ransac_prob_good_inliers;
				tfest_params.verbose = false;

				mrpt::tfest::TSE2RobustResult tfest_result;
				mrpt::tfest::se2_l2_robust(
					correspondences, options.ransac_SOG_sigma_m, tfest_params,
					tfest_result);

				ASSERT_(pdf_SOG);
				*pdf_SOG = tfest_result.transformation;
				largestConsensusCorrs = tfest_result.largestSubSet;

				// Simplify the SOG by merging close modes:
				// -------------------------------------------------
				size_t nB = pdf_SOG->size();
				outInfo.sog1 = pdf_SOG;

				// Keep only the most-likely Gaussian mode:
				CPosePDFSOG::TGaussianMode best_mode;
				best_mode.log_w = -std::numeric_limits<double>::max();

				for (size_t n = 0; n < pdf_SOG->size(); n++)
				{
					const CPosePDFSOG::TGaussianMode& m = (*pdf_SOG)[n];

					if (m.log_w > best_mode.log_w) best_mode = m;
				}

				pdf_SOG->clear();
				pdf_SOG->push_back(best_mode);
				outInfo.sog2 = pdf_SOG;

				MRPT_LOG_INFO_STREAM(
					"[CGridMapAligner] amRobustMatch: "
					<< nB << " SOG modes reduced to " << pdf_SOG->size()
					<< " (most-likely) (min.inliers=" << min_inliers << ")\n");

			}  // end of amRobustMatch
			else
			{
				// ====================================================
				//             METHOD: "Modified" RANSAC
				// ====================================================
				mrpt::tfest::TMatchingPairList all_corrs = correspondences;

				const size_t nCorrs = all_corrs.size();
				ASSERT_(nCorrs >= 2);

				pdf_SOG->clear();  // Start with 0 Gaussian modes

				// Build a points-map for exploiting its KD-tree:
				// ----------------------------------------------------
				CSimplePointsMap lm1_pnts, lm2_pnts;
				lm1_pnts.reserve(nLM1);
				for (size_t i = 0; i < nLM1; i++)
					lm1_pnts.insertPoint(lm1->landmarks.get(i)->pose_mean);
				lm2_pnts.reserve(nLM2);
				for (size_t i = 0; i < nLM2; i++)
					lm2_pnts.insertPoint(lm2->landmarks.get(i)->pose_mean);

				// RANSAC loop
				// ---------------------
				const size_t minInliersTOaccept =
					round(options.ransac_minSetSizeRatio * 0.5 * (nLM1 + nLM2));
				// Set an initial # of iterations:
				const unsigned int ransac_min_nSimulations =
					2 * (nLM1 + nLM2);  // 1000;
				unsigned int ransac_nSimulations =
					10;  // It doesn't matter actually, since will be changed in
				// the first loop
				const double probability_find_good_model = 0.9999;

				const double chi2_thres_dim1 =
					mrpt::math::chi2inv(options.ransac_chi2_quantile, 1);
				const double chi2_thres_dim2 =
					mrpt::math::chi2inv(options.ransac_chi2_quantile, 2);

				// Generic 2x2 covariance matrix for all features in their local
				// coords:
				CMatrixDouble22 COV_pnt;
				COV_pnt.get_unsafe(0, 0) = COV_pnt.get_unsafe(1, 1) =
					square(options.ransac_SOG_sigma_m);

				// The absolute number of trials.
				// in practice it's only important for a reduced number of
				// correspondences, to avoid a dead-lock:
				//  It's the binomial coefficient:
				//   / n \      n!          n * (n-1)
				//   |   | = ----------- = -----------
				//   \ 2 /    2! (n-2)!         2
				//
				const unsigned int max_trials =
					(nCorrs * (nCorrs - 1) / 2) *
					5;  // "*5" is just for safety...

				unsigned int iter = 0;  // Valid iterations (those passing the
				// first mahalanobis test)
				unsigned int trials = 0;  // counter of all iterations,
				// including "iter" + failing ones.
				while (iter < ransac_nSimulations &&
					   trials <
						   max_trials)  // ransac_nSimulations can be dynamic
				{
					trials++;

					mrpt::tfest::TMatchingPairList tentativeSubSet;

					// Pick 2 random correspondences:
					uint32_t idx1, idx2;
					idx1 = getRandomGenerator().drawUniform32bit() % nCorrs;
					do
					{
						idx2 = getRandomGenerator().drawUniform32bit() % nCorrs;
					} while (idx1 == idx2);  // Avoid a degenerated case!

					// Uniqueness of features:
					if (all_corrs[idx1].this_idx == all_corrs[idx2].this_idx ||
						all_corrs[idx1].this_idx == all_corrs[idx2].other_idx)
						continue;
					if (all_corrs[idx1].other_idx == all_corrs[idx2].this_idx ||
						all_corrs[idx1].other_idx == all_corrs[idx2].other_idx)
						continue;

					// Check the feasibility of this pair "idx1"-"idx2":
					//  The distance between the pair of points in MAP1 must be
					//  very close
					//   to that of their correspondences in MAP2:
					const double corrs_dist1 =
						mrpt::math::distanceBetweenPoints(
							all_corrs[idx1].this_x, all_corrs[idx1].this_y,
							all_corrs[idx1].this_z, all_corrs[idx2].this_x,
							all_corrs[idx2].this_y, all_corrs[idx2].this_z);

					const double corrs_dist2 =
						mrpt::math::distanceBetweenPoints(
							all_corrs[idx1].other_x, all_corrs[idx1].other_y,
							all_corrs[idx1].other_z, all_corrs[idx2].other_x,
							all_corrs[idx2].other_y, all_corrs[idx2].other_z);

					// Is is a consistent possibility?
					//  We use a chi2 test (see paper for the derivation)
					const double corrs_dist_chi2 =
						square(square(corrs_dist1) - square(corrs_dist2)) /
						(8.0 * square(options.ransac_SOG_sigma_m) *
						 (square(corrs_dist1) + square(corrs_dist2)));

					if (corrs_dist_chi2 > chi2_thres_dim1) continue;  // Nope

					iter++;  // Do not count iterations if they fail the test
					// above.

					// before proceeding with this hypothesis, is it an old one?
					bool is_new_hyp = true;
					for (auto& sog_mode : sog_modes)
					{
						if (sog_mode.first.contains(all_corrs[idx1]) &&
							sog_mode.first.contains(all_corrs[idx2]))
						{
							// Increment weight:
							sog_mode.second.log_w =
								std::log(std::exp(sog_mode.second.log_w) + 1.0);
							is_new_hyp = false;
							break;
						}
					}
					if (!is_new_hyp) continue;

					// Ok, it's a new hypothesis:
					tentativeSubSet.push_back(all_corrs[idx1]);
					tentativeSubSet.push_back(all_corrs[idx2]);

					// Maintain a list of already used landmarks IDs in both
					// maps to avoid repetitions:
					std::vector<bool> used_landmarks1(nLM1, false);
					std::vector<bool> used_landmarks2(nLM2, false);

					used_landmarks1[all_corrs[idx1].this_idx] = true;
					used_landmarks1[all_corrs[idx2].this_idx] = true;
					used_landmarks2[all_corrs[idx1].other_idx] = true;
					used_landmarks2[all_corrs[idx2].other_idx] = true;

					// Build the transformation for these temptative
					// correspondences:
					bool keep_incorporating = true;
					CPosePDFGaussian temptPose;
					do  // Incremently incorporate inliers:
					{
						if (!mrpt::tfest::se2_l2(tentativeSubSet, temptPose))
							continue;  // Invalid matching...

						// The computed cov is "normalized", i.e. must be
						// multiplied by std^2_xy
						temptPose.cov *= square(options.ransac_SOG_sigma_m);

						// cout << "q: " << temptPose << endl;

						// Find the landmark in MAP2 with the best (maximum)
						// product-integral:
						//   (i^* , j^*) = arg max_(i,j) \int p_i()p_j()
						//----------------------------------------------------------------------
						const double ccos = cos(temptPose.mean.phi());
						const double ssin = sin(temptPose.mean.phi());

						CMatrixDouble22 Hc;  // Jacobian wrt point_j
						Hc.get_unsafe(1, 1) = ccos;
						Hc.get_unsafe(0, 0) = ccos;
						Hc.get_unsafe(1, 0) = ssin;
						Hc.get_unsafe(0, 1) = -ssin;

						CMatrixFixedNumeric<double, 2, 3>
							Hq;  // Jacobian wrt transformation q
						Hq(0, 0) = 1;
						Hq(1, 1) = 1;

						TPoint2D p2_j_local;
						vector<float> matches_dist;
						std::vector<size_t> matches_idx;

						CPoint2DPDFGaussian pdf_M2_j;
						CPoint2DPDFGaussian pdf_M1_i;

// Use integral-of-product vs. mahalanobis distances to match:
#define GRIDMAP_USE_PROD_INTEGRAL

#ifdef GRIDMAP_USE_PROD_INTEGRAL
						double best_pair_value = 0;
#else
						double best_pair_value =
							std::numeric_limits<double>::max();
#endif
						double best_pair_d2 =
							std::numeric_limits<double>::max();
						pair<size_t, size_t> best_pair_ij;

						//#define SHOW_CORRS

#ifdef SHOW_CORRS
						CDisplayWindowPlots win("Matches");
#endif
						for (size_t j = 0; j < nLM2; j++)
						{
							if (used_landmarks2[j]) continue;

							lm2_pnts.getPoint(
								j, p2_j_local);  // In local coords.
							pdf_M2_j.mean = mrpt::poses::CPoint2D(
								temptPose.mean +
								p2_j_local);  // In (temptative) global coords:
							pdf_M2_j.cov.get_unsafe(0, 0) =
								pdf_M2_j.cov.get_unsafe(1, 1) =
									square(options.ransac_SOG_sigma_m);

#ifdef SHOW_CORRS
							win.plotEllipse(
								pdf_M2_j.mean.x(), pdf_M2_j.mean.y(),
								pdf_M2_j.cov, 2, "b-",
								format("M2_%u", (unsigned)j), true);
#endif

							static const unsigned int N_KDTREE_SEARCHED = 3;

							// Look for a few close features which may be
							// potential matches:
							lm1_pnts.kdTreeNClosestPoint2DIdx(
								pdf_M2_j.mean.x(), pdf_M2_j.mean.y(),
								N_KDTREE_SEARCHED, matches_idx, matches_dist);

							// And for each one, compute the product-integral:
							for (unsigned long u : matches_idx)
							{
								if (used_landmarks1[u]) continue;

								// Jacobian wrt transformation q
								Hq.get_unsafe(0, 2) =
									-p2_j_local.x * ssin - p2_j_local.y * ccos;
								Hq.get_unsafe(1, 2) =
									p2_j_local.x * ccos - p2_j_local.y * ssin;

								// COV_j = Hq \Sigma_q Hq^t + Hc Cj Hc^t
								Hc.multiply_HCHt(COV_pnt, pdf_M1_i.cov);
								Hq.multiply_HCHt(
									temptPose.cov, pdf_M1_i.cov, true);

								float px, py;
								lm1_pnts.getPoint(u, px, py);
								pdf_M1_i.mean.x(px);
								pdf_M1_i.mean.y(py);

#ifdef SHOW_CORRS
								win.plotEllipse(
									pdf_M1_i.mean.x(), pdf_M1_i.mean.y(),
									pdf_M1_i.cov, 2, "r-",
									format("M1_%u", (unsigned)matches_idx[u]),
									true);
#endif

// And now compute the product integral:
#ifdef GRIDMAP_USE_PROD_INTEGRAL
								const double prod_ij =
									pdf_M1_i.productIntegralWith(pdf_M2_j);
								// const double prod_ij_d2 = square(
								// pdf_M1_i.mahalanobisDistanceTo(pdf_M2_j) );

								if (prod_ij > best_pair_value)
#else
								const double prod_ij =
									pdf_M1_i.mean.distanceTo(pdf_M2_j.mean);
								if (prod_ij < best_pair_value)
#endif
								{
									// const double prodint_ij =
									// pdf_M1_i.productIntegralWith2D(pdf_M2_j);

									best_pair_value = prod_ij;
									best_pair_ij.first = u;
									best_pair_ij.second = j;

									best_pair_d2 =
										square(pdf_M1_i.mahalanobisDistanceTo(
											pdf_M2_j));

									// cout << "P1: " << pdf_M1_i.mean << " C= "
									// << pdf_M1_i.cov.inMatlabFormat() << endl;
									// cout << "P2: " << pdf_M2_j.mean << " C= "
									// << pdf_M2_j.cov.inMatlabFormat() << endl;
									// cout << "  -> " << format("%e",prod_ij)
									// << " d2: " << best_pair_d2 << endl <<
									// endl;
								}
							}  // end for u (closest matches of LM2 in MAP 1)

#ifdef SHOW_CORRS
							win.axis_fit(true);
							win.waitForKey();
							win.clear();
#endif

						}  // end for each LM2

						// Stop when the best choice has a bad mahal. dist.
						keep_incorporating = false;

						// For the best (i,j), gate by the mahalanobis distance:
						if (best_pair_d2 < chi2_thres_dim2)
						{
							// AND, also, check if the descriptors have some
							// resemblance!!
							// ----------------------------------------------------------------
							// double feat_dist =
							// lm1->landmarks.get(best_pair_ij.first)->features[0]->descriptorDistanceTo(*lm1->landmarks.get(best_pair_ij.second)->features[0]);
							// if (feat_dist< options.threshold_max)
							{
								float p1_i_localx, p1_i_localy;
								float p2_j_localx, p2_j_localy;
								lm1_pnts.getPoint(
									best_pair_ij.first, p1_i_localx,
									p1_i_localy);
								lm2_pnts.getPoint(
									best_pair_ij.second, p2_j_localx,
									p2_j_localy);  // In local coords.

								used_landmarks1[best_pair_ij.first] = true;
								used_landmarks2[best_pair_ij.second] = true;

								tentativeSubSet.push_back(
									mrpt::tfest::TMatchingPair(
										best_pair_ij.first, best_pair_ij.second,
										p1_i_localx, p1_i_localy, 0,  // MAP1
										p2_j_localx, p2_j_localy, 0  // MAP2
										));

								keep_incorporating = true;
							}
						}

					} while (keep_incorporating);

					// Consider this pairing?
					const size_t ninliers = tentativeSubSet.size();
					if (ninliers > minInliersTOaccept)
					{
						CPosePDFSOG::TGaussianMode newGauss;
						newGauss.log_w = 0;  // log(1);  //
						// std::log(static_cast<double>(nCoincidences));
						newGauss.mean = temptPose.mean;
						newGauss.cov = temptPose.cov;

						sog_modes[tentativeSubSet] = newGauss;

						// cout << "ITER: " << iter << " #inliers: " << ninliers
						// << " q: " << temptPose.mean << endl;
					}

					// Keep the largest consensus & dynamic # of steps:
					if (largestConsensusCorrs.size() < ninliers)
					{
						largestConsensusCorrs = tentativeSubSet;

						// Update estimate of N, the number of trials to ensure
						// we pick,
						// with probability p, a data set with no outliers.
						const double fracinliers =
							ninliers /
							static_cast<double>(std::min(nLM1, nLM2));
						double pNoOutliers =
							1 - pow(fracinliers,
									static_cast<double>(
										2.0));  // minimumSizeSamplesToFit

						pNoOutliers = std::max(
							std::numeric_limits<double>::epsilon(),
							pNoOutliers);  // Avoid division by -Inf
						pNoOutliers = std::min(
							1.0 - std::numeric_limits<double>::epsilon(),
							pNoOutliers);  // Avoid division by 0.
						// Number of
						ransac_nSimulations =
							log(1 - probability_find_good_model) /
							log(pNoOutliers);

						if (ransac_nSimulations < ransac_min_nSimulations)
							ransac_nSimulations = ransac_min_nSimulations;

						// if (verbose)
						//	cout << "[Align] Iter #" << iter << " Est. #iters: "
						//<< ransac_nSimulations << "  pNoOutliers=" <<
						// pNoOutliers << " #inliers: " << ninliers << endl;
					}

				}  // end of RANSAC loop

				// Move SOG modes into pdf_SOG:
				pdf_SOG->clear();
				for (auto s = sog_modes.begin(); s != sog_modes.end(); ++s)
				{
					cout << "SOG mode: " << s->second.mean
						 << " inliers: " << s->first.size() << endl;
					pdf_SOG->push_back(s->second);
				}

				// Normalize:
				if (pdf_SOG->size() > 0) pdf_SOG->normalizeWeights();

				// Simplify the SOG by merging close modes:
				// -------------------------------------------------
				size_t nB = pdf_SOG->size();
				outInfo.sog1 = pdf_SOG;

				CTicTac merge_clock;
				pdf_SOG->mergeModes(options.maxKLd_for_merge, false);
				const double merge_clock_tim = merge_clock.Tac();

				outInfo.sog2 = pdf_SOG;
				size_t nA = pdf_SOG->size();
				MRPT_LOG_INFO(mrpt::format(
					"[CGridMapAligner] amModifiedRANSAC: %u SOG modes "
					"merged to %u in %.03fsec\n",
					(unsigned int)nB, (unsigned int)nA, merge_clock_tim));

			}  // end of amModifiedRANSAC

			// Save best corrs:
			if (options.debug_save_map_pairs)
			{
				static unsigned int NN = 0;
				static const COccupancyGridMap2D* lastM1 = nullptr;
				if (lastM1 != m1)
				{
					lastM1 = m1;
					NN = 0;
				}
				printf(
					"   Largest consensus: %u\n",
					static_cast<unsigned>(largestConsensusCorrs.size()));
				CEnhancedMetaFile::LINUX_IMG_WIDTH(
					m1->getSizeX() + m2->getSizeX() + 50);
				CEnhancedMetaFile::LINUX_IMG_HEIGHT(
					max(m1->getSizeY(), m2->getSizeY()) + 50);

				for (auto s = sog_modes.begin(); s != sog_modes.end(); ++s)
				{
					COccupancyGridMap2D::saveAsEMFTwoMapsWithCorrespondences(
						format("__debug_corrsGrid_%05u.emf", NN), m1, m2,
						s->first);
					COccupancyGridMap2D::saveAsBitmapTwoMapsWithCorrespondences(
						format("__debug_corrsGrid_%05u.png", NN), m1, m2,
						s->first);
					++NN;
				}
			}

			// --------------------------------------------------------------------
			// At this point:
			//   - "pdf_SOG": has the resulting PDF with the SOG (from whatever
			//   method)
			//   - "largestConsensusCorrs": The 'best' set of correspondences
			//
			//  Now: If we had a multi-metric map, use the points map to improve
			//        the estimation with ICP.
			// --------------------------------------------------------------------
			if (multimap1 && multimap2 && !multimap1->m_pointsMaps.empty() &&
				!multimap2->m_pointsMaps.empty() &&
				multimap1->m_pointsMaps[0] && multimap2->m_pointsMaps[0])
			{
				CSimplePointsMap::Ptr pnts1 = multimap1->m_pointsMaps[0];
				CSimplePointsMap::Ptr pnts2 = multimap2->m_pointsMaps[0];

				CICP icp;
				CICP::TReturnInfo icpInfo;

				icp.options.maxIterations = 20;
				icp.options.smallestThresholdDist = 0.05f;
				icp.options.thresholdDist = 0.75f;

				// cout << "Points: " << pnts1->size() << " " << pnts2->size()
				// << endl;

				// Invoke ICP once for each mode in the SOG:
				size_t cnt = 0;
				outInfo.icp_goodness_all_sog_modes.clear();
				for (auto i = pdf_SOG->begin(); i != pdf_SOG->end(); ++cnt)
				{
					CPosePDF::Ptr icp_est = icp.Align(
						pnts1.get(), pnts2.get(), (i)->mean, nullptr, &icpInfo);

					//(i)->cov(0,0) += square( 0.05 );
					//(i)->cov(1,1) += square( 0.05 );
					//(i)->cov(2,2) += square( DEG2RAD(0.05) );

					CPosePDFGaussian i_gauss(i->mean, i->cov);
					CPosePDFGaussian icp_gauss(icp_est->getMeanVal(), i->cov);

					const double icp_maha_dist =
						i_gauss.mahalanobisDistanceTo(icp_gauss);

					cout << "ICP " << cnt << " log-w: " << i->log_w
						 << " Goodness: " << 100 * icpInfo.goodness
						 << "  D_M= " << icp_maha_dist << endl;
					cout << "  final pos: " << icp_est->getMeanVal() << endl;
					cout << "    org pos: " << i->mean << endl;

					outInfo.icp_goodness_all_sog_modes.push_back(
						icpInfo.goodness);

					// Discard?
					if (icpInfo.goodness > options.min_ICP_goodness &&
						icp_maha_dist < options.max_ICP_mahadist)
					{
						icp_est->getMean((i)->mean);
						++i;
					}
					else
					{
						// Delete:
						i = pdf_SOG->erase(i);
					}

				}  // end for i

				// Merge:
				outInfo.sog3 = pdf_SOG;

				pdf_SOG->mergeModes(options.maxKLd_for_merge, false);
				MRPT_LOG_DEBUG_STREAM(
					"[CGridMapAligner] " << pdf_SOG->size()
										 << " SOG modes merged after ICP.");

			}  // end multimapX

		}  // end of, yes, we have enough correspondences

	}  // end of: yes, there are landmarks in the grid maps!

	// Copy the output info if requested:
	// -------------------------------------------------
	MRPT_TODO(
		"Refactor `info` so it is polymorphic and can use dynamic_cast<> here");
	if (info)
	{
		auto* info_ = static_cast<TReturnInfo*>(info);
		*info_ = outInfo;
	}

	if (runningTime)
	{
		*runningTime = tictac->Tac();
		delete tictac;
	}

	return pdf_SOG;

	MRPT_END
}
示例#24
0
void display()
{
	CDisplayWindow3D window("Ray trace demo", 640, 480);
	window.setPos(10, 10);
	std::this_thread::sleep_for(20ms);
	COpenGLScene::Ptr scene1 = mrpt::make_aligned_shared<COpenGLScene>();
	// COpenGLScene::Ptr &scene1=window.get3DSceneAndLock();
	opengl::CGridPlaneXY::Ptr plane1 =
		mrpt::make_aligned_shared<CGridPlaneXY>(-20, 20, -20, 20, 0, 1);
	plane1->setColor(GRID_R, GRID_G, GRID_B);
	scene1->insert(plane1);
	scene1->insert(
		mrpt::make_aligned_shared<CAxis>(-5, -5, -5, 5, 5, 5, 2.5, 3, true));
	CSetOfObjects::Ptr world = mrpt::make_aligned_shared<CSetOfObjects>();
	generateObjects(world);
	scene1->insert(world);
	CPose3D basePose = randomPose();
	CAngularObservationMesh::Ptr aom =
		mrpt::make_aligned_shared<CAngularObservationMesh>();
	CTicTac t;
	t.Tic();
	CAngularObservationMesh::trace2DSetOfRays(
		scene1, basePose, aom,
		CAngularObservationMesh::TDoubleRange::CreateFromAmount(
			-M_PI / 2, 0, HOW_MANY_PITCHS),
		CAngularObservationMesh::TDoubleRange::CreateFromAperture(
			M_PI, HOW_MANY_YAWS));
	cout << "Elapsed time: " << t.Tac() << " seconds.\n";
	aom->setColor(0, 1, 0);
	aom->setWireframe(true);
	// Comment to stop showing traced rays and scan range guidelines.
	CSetOfLines::Ptr traced = mrpt::make_aligned_shared<CSetOfLines>();
	CSetOfLines::Ptr guides = mrpt::make_aligned_shared<CSetOfLines>();
	aom->getTracedRays(traced);
	traced->setLineWidth(1.5);
	traced->setColor(1, 0, 0);
	guideLines(basePose, guides, 10);
	scene1->insert(traced);
	scene1->insert(guides);

	// Uncomment to show also traced rays who got lost.
	/*
	CSetOfLines::Ptr untraced=mrpt::make_aligned_shared<CSetOfLines>();
	aom->getUntracedRays(untraced,20);
	untraced->setLineWidth(1);
	untraced->setColor(1,1,1,0.5);
	scene1->insert(untraced);
	*/
	CSphere::Ptr point = mrpt::make_aligned_shared<CSphere>(0.2);
	point->setColor(0, 1, 0);
	point->setPose(basePose);
	scene1->insert(point);
	CDisplayWindow3D window2("Observed mesh", 640, 480);
	window2.setPos(660, 10);
	std::this_thread::sleep_for(20ms);
	window.get3DSceneAndLock() = scene1;
	window.unlockAccess3DScene();
	window.setCameraElevationDeg(25.0f);
	COpenGLScene::Ptr& scene2 = window2.get3DSceneAndLock();
	scene2->insert(aom);
	opengl::CGridPlaneXY::Ptr plane2 =
		mrpt::make_aligned_shared<CGridPlaneXY>(-20, 20, -20, 20, 0, 1);
	plane2->setColor(GRID_R, GRID_G, GRID_B);
	scene2->insert(plane2);
	scene2->insert(
		mrpt::make_aligned_shared<CAxis>(-5, -5, -5, 5, 5, 5, 2.5, 3, true));
	window2.unlockAccess3DScene();
	window2.setCameraElevationDeg(25.0f);

	window.waitForKey();
}
示例#25
0
/*-------------------------------------------------------------
						connectAndEnableMotors
-------------------------------------------------------------*/
void CActivMediaRobotBase::connectAndEnableMotors()
{
#if MRPT_HAS_ARIA
	// Establecimiento de la conexión con el pioneer
	if (!static_cast<ArSimpleConnector*>(m_simpleConnector)->connectRobot( THE_ROBOT ))
	{
		THROW_EXCEPTION_CUSTOM_MSG1("[CActivMediaRobotBase] Couldn't connect to robot thru %s", m_com_port.c_str() )
	}
	else
	{
		// Enable processing thread:
		THE_ROBOT->lock();
		THE_ROBOT->runAsync(true);
		THE_ROBOT->unlock();

		mrpt::system::sleep(500);

		CTicTac	tictac;
		tictac.Tic();

		if (!THE_ROBOT->areMotorsEnabled())  // Are the motors already enabled?
		{
			THE_ROBOT->lock();
			THE_ROBOT->enableMotors();
			THE_ROBOT->unlock();
			mrpt::system::sleep(500);

			bool	success = false;

			while (tictac.Tac()<4000)
			{
			    THE_ROBOT->lock();
			    if (!THE_ROBOT->isRunning())
				{
					THROW_EXCEPTION("ARIA robot is not running");
				}
				if (THE_ROBOT->areMotorsEnabled())
				{
					THE_ROBOT->unlock();
					success = true;
					break;
				}
				THE_ROBOT->unlock();
				mrpt::system::sleep(100);
			}

			if (!success)
			{
				disconnectAndDisableMotors();
				THROW_EXCEPTION("Couldn't enable robot motors");
			}

			// Start continuous stream of packets:
			THE_ROBOT->lock();
			THE_ROBOT->requestEncoderPackets();

			if (m_enableSonars)
					THE_ROBOT->enableSonar();
			else	THE_ROBOT->disableSonar();

			THE_ROBOT->unlock();

			m_firstIncreOdometry = true;
		}
	}
#else
	THROW_EXCEPTION("MRPT has been compiled with 'MRPT_BUILD_ARIA'=OFF, so this class cannot be used.");
#endif
}
示例#26
0
文件: test.cpp 项目: 3660628/mrpt
// ------------------------------------------------------
//				TestImageCap
// ------------------------------------------------------
void TestImageConversion()
{
	// BMP -> JPEG conversion tester:
	// --------------------------------
	CImage		img,img2;
	CTicTac			tictac;
	CTimeLogger     timlog;

	tictac.Tic();
	if (!img.loadFromFile(myDataDir+string("frame_color.jpg")))
	{
		cerr << "Cannot load " << myDataDir+string("frame_color.jpg") << endl;
		return;
	}
	printf("Image loaded in %.03fms\n", 1000*tictac.Tac() );

	if (false)   // A very simple test:
	{
		CDisplayWindow		win1("JPEG file, color");
		win1.setPos(10,10);

		win1.showImage( img );

		cout << "Push a key in the console or in the window to continue...";
		win1.waitForKey();
		cout << "Done" << endl;

		timlog.enter("grayscale1");
		img = img.grayscale();
		timlog.leave("grayscale1");

		CDisplayWindow		win2("JPEG file, gray");
		win2.showImage( img );
		win1.setPos(300,10);

		cout << "Push a key in the console or in the window to continue...";
		win2.waitForKey();
		cout << "Done" << endl;

		mrpt::system::pause();
		return;
	}

	CDisplayWindow		win1("win1"),win2("win2"),win3("win3"),win4("win4");

	CImage			imgSmall( img );
	CImage			imgGray;

	for (int i=0;i<50;i++)
	{
		timlog.enter("grayscale2");
		imgSmall.grayscale(imgGray);
		timlog.leave("grayscale2");
	}

	CImage			imgSmall2( imgGray.scaleHalfSmooth() );
	CImage			imgSmallRGB( img.scaleHalf() ); //Smooth() );

	// Test some draw capabilities:
	// ---------------------------------
	imgSmall.rectangle( 85,35, 170,170,TColor(255,0,0),10);

	imgSmall.line( 550,75, 650,25,TColor(0,0,255));
	imgSmall.line( -10,-20, 20,30,TColor(0,0,255));

	CMatrix 	COV(2,2);
	COV(0,0) = 100;
	COV(1,1) = 50;
	COV(0,1) = COV(1,0) = -30;
	imgSmall.ellipseGaussian( &COV, 600.0f,50.0f, 2, TColor(255,255,0), 4);
	imgGray.ellipseGaussian( &COV, 100.0f,100.0f, 2, TColor(0,0,255), 4);

	imgSmall.drawImage( 400,500,imgGray );

	// Show the windows now:
	// ------------------------------------------------------
	win1.showImage( imgSmall ); win1.setPos(0,0);
	win2.showImage( imgSmall2 ); win2.setPos(810,0);
	win3.showImage( imgGray ); win3.setPos(810,300);
	win4.showImage( imgSmallRGB ); win4.setPos(300,400);



	cout << "Press any key on 'win4' to exit" << endl;
	win4.waitForKey();

	tictac.Tic();
	img2.saveToFile("frame_out.jpg");
	printf("jpeg file saved in %.03fms\n", 1000.0f*tictac.Tac() );

	imgSmall2.saveToFile("frame_out_small.png");

	return;
}
示例#27
0
文件: test.cpp 项目: GYengera/mrpt
// ------------------------------------------------------
//		TestMatchFeatures
// ------------------------------------------------------
void TestMatchFeatures( bool showMatches )
{

	CFeatureExtraction	fExt;
	CFeatureList		featsHarris_L, featsHarris_R, featsSIFT_L, featsSIFT_R, featsSURF_L, featsSURF_R, featsFAST_L, featsFAST_R;
	CMatchedFeatureList	mHarris, mSIFT, mSURF, mHarris_SAD, mFAST_CC, mFAST_SAD;
	CImage				imL, imR;

	string imgL = myDataDir + string("imL_p01.jpg");		// Left image
	string imgR = myDataDir + string("imR_p01.jpg");		// Right image

	// Load and check images
	if (!imL.loadFromFile( imgL ))
	{
		cerr << "Cannot load " << imgL  << endl;
		return;
	}
	cout << "Loaded LEFT image: " << endl << imgL << endl;

	if (!imR.loadFromFile( imgR ))
	{
		cerr << "Cannot load " << imgR  << endl;
		return;
	}
	cout << "Loaded RIGHT image: " << endl << imgR << endl;

	cout << "***************************************************" << endl;
	cout << "***************************************************" << endl;

	// Extract features:
    // HARRIS
	cout << "Detecting HARRIS features in LEFT image" << endl;
	fExt.options.featsType = featHarris;
	fExt.detectFeatures( imL, featsHarris_L, 250 );
	cout << "Detected " << featsHarris_L.size() << endl;

	cout << "Detecting HARRIS features in RIGHT image" << endl;
	fExt.detectFeatures( imR, featsHarris_R, 250 );
	cout << "Detected " << featsHarris_R.size() << endl;
	cout << "***************************************************" << endl;

	// SIFT
	cout << "Detecting SIFT features in LEFT image" << endl;
	fExt.options.featsType = featSIFT;
	//fExt.options.SIFTOptions.implementation = CFeatureExtraction::Hess;
	fExt.options.SIFTOptions.implementation = CFeatureExtraction::OpenCV;
	fExt.detectFeatures( imL, featsSIFT_L );
	cout << "Detected " << featsSIFT_L.size() << endl;

	cout << "Detecting SIFT features in RIGHT image" << endl;
	fExt.options.featsType = featSIFT;
	//fExt.options.SIFTOptions.implementation = CFeatureExtraction::Hess;
	fExt.options.SIFTOptions.implementation = CFeatureExtraction::OpenCV;
	fExt.detectFeatures( imR, featsSIFT_R );
	cout << "Detected " << featsSIFT_R.size() << endl;
	cout << "***************************************************" << endl;

	// SURF
	cout << "Detecting SURF features in LEFT image" << endl;
	fExt.options.featsType = featSURF;
	fExt.detectFeatures( imL, featsSURF_L );
	cout << "Detected " << featsSURF_L.size() << endl;

	cout << "Detecting SURF features in RIGHT image" << endl;
	fExt.detectFeatures( imR, featsSURF_R );
	cout << "Detected " << featsSURF_R.size() << endl;
	cout << "***************************************************" << endl;

	// FAST
	cout << "Detecting FAST features in LEFT image" << endl;
	fExt.options.featsType = featFAST;
	fExt.detectFeatures( imL, featsFAST_L, 0, 250 );
	cout << "Detected " << featsFAST_L.size() << endl;

	cout << "Detecting FAST features in RIGHT image" << endl;
	fExt.detectFeatures( imR, featsFAST_R, 0, 250 );
	cout << "Detected " << featsFAST_R.size() << endl;
	cout << "***************************************************" << endl;
	cout << "***************************************************" << endl;

	// Match features:
	size_t nMatches;
	TMatchingOptions opt;

//	// HARRIS
	CTicTac tictac;
	double T = 0.0;
	cout << "Matching HARRIS features" << endl;
	tictac.Tic();
	nMatches = matchFeatures( featsHarris_L, featsHarris_R, mHarris );
	T = tictac.Tac();
	cout << "[NCC] Matches found: " << mHarris.size() << " in " << T*1000.0f << " ms " << endl;

	opt.matching_method = TMatchingOptions::mmSAD;
	tictac.Tic();
	nMatches = matchFeatures( featsHarris_L, featsHarris_R, mHarris_SAD, opt );
	T = tictac.Tac();
	cout << "[SAD] Matches found: " << mHarris_SAD.size() << " in " << T*1000.0f << " ms " << endl;
	cout << "***************************************************" << endl;

	// SIFT
	cout << "Matching SIFT features by DESCRIPTOR" << endl;
	opt.matching_method = TMatchingOptions::mmDescriptorSIFT;
	tictac.Tic();
	nMatches = matchFeatures( featsSIFT_L, featsSIFT_R, mSIFT, opt );
	T = tictac.Tac();
	cout << "Matches found: " << mSIFT.size() << " in " << T*1000.0f << " ms " << endl;
	cout << "***************************************************" << endl;

	// SURF
	cout << "Matching SURF features by DESCRIPTOR" << endl;
	opt.matching_method = TMatchingOptions::mmDescriptorSURF;
	tictac.Tic();
	nMatches = matchFeatures( featsSURF_L, featsSURF_R, mSURF, opt );
	T = tictac.Tac();
	cout << "Matches found: " << mSURF.size() << " in " << T*1000.0f << " ms " << endl;
	cout << "***************************************************" << endl;

	// FAST
	cout << "Matching FAST features" << endl;
	tictac.Tic();
	nMatches = matchFeatures( featsFAST_L, featsFAST_R, mFAST_CC );
	T = tictac.Tac();
	cout << "[NCC] Matches found: " << mFAST_CC.size() << " in " << T*1000.0f << " ms " << endl;

	opt.matching_method = TMatchingOptions::mmSAD;
	tictac.Tic();
	nMatches = matchFeatures( featsFAST_L, featsFAST_R, mFAST_SAD, opt );
	T = tictac.Tac();
	cout << "[SAD] Matches found: " << mFAST_SAD.size() << " in " << T*1000.0f << " ms " << endl;
	cout << "***************************************************" << endl;


    if( showMatches )
    {
        CDisplayWindow winHarrisSAD, winHarrisNCC, winFASTSAD, winFASTNCC, winSIFT, winSURF;

        winHarrisSAD.setWindowTitle("Matches with Harris + SAD");
        winHarrisNCC.setWindowTitle("Matches with Harris + NCC");
        winFASTSAD.setWindowTitle("Matches with FAST + SAD");
        winFASTNCC.setWindowTitle("Matches with FAST + NCC");
        winSIFT.setWindowTitle("Matches with SIFT");
        winSURF.setWindowTitle("Matches with SURF");

        winHarrisNCC.showImagesAndMatchedPoints( imL, imR, mHarris, TColor(0,0,255) );
        winHarrisSAD.showImagesAndMatchedPoints( imL, imR, mHarris_SAD, TColor(0,0,255) );
        winSIFT.showImagesAndMatchedPoints( imL, imR, mSIFT, TColor(0,255,0) );
        winSURF.showImagesAndMatchedPoints( imL, imR, mSURF, TColor(0,255,0) );
        winFASTSAD.showImagesAndMatchedPoints( imL, imR, mFAST_SAD, TColor(0,255,0) );
        winFASTNCC.showImagesAndMatchedPoints( imL, imR, mFAST_CC, TColor(0,255,0) );

        mrpt::system::pause();
    }

} // end TestMatchFeatures
示例#28
0
文件: test.cpp 项目: KongWeibin/mrpt
void TestCapture_FlyCapture2_stereo()
{
	cout << " FlyCapture2 version: " << CImageGrabber_FlyCapture2::getFC2version() << std::endl;

	// Left camera:
	// ------------------------------------------
	CImageGrabber_FlyCapture2   capture_left;
	TCaptureOptions_FlyCapture2 cam_options_left;

	cam_options_left.framerate="FRAMERATE_30";
	cam_options_left.videomode="VIDEOMODE_1280x960RGB";

	cam_options_left.open_by_guid = true;
	cam_options_left.camera_guid[0] = 0x63A8D3CE;
	cam_options_left.camera_guid[1] = 0xB49580D6;
	cam_options_left.camera_guid[2] = 0x004AED1C;
	cam_options_left.camera_guid[3] = 0xDDE4EF14;

	cam_options_left.strobe_enabled = true;
	cam_options_left.strobe_source = 1;  // GPIO pin #
	cam_options_left.strobe_duration = 1.0; // ms

	capture_left.open(cam_options_left,false /*only open, don't start grabbing*/);

	// Right camera:
	// ------------------------------------------
	CImageGrabber_FlyCapture2   capture_right;
	TCaptureOptions_FlyCapture2 cam_options_right;

	cam_options_right.framerate = cam_options_left.framerate;
	cam_options_right.videomode = cam_options_left.videomode;

	cam_options_right.open_by_guid = true;
	cam_options_right.camera_guid[0] = 0xB9862FD2;
	cam_options_right.camera_guid[1] = 0x7AE0E03A;
	cam_options_right.camera_guid[2] = 0xA6BC0321;
	cam_options_right.camera_guid[3] = 0x16654DC9;

	cam_options_right.trigger_enabled = true;
	cam_options_right.trigger_source = 0;  // GPIO pin #

	capture_right.open(cam_options_right,false /*only open, don't start grabbing*/);

	// Open both cameras simultaneously:
#if 0
	const CImageGrabber_FlyCapture2 *cameras[2] = { &capture_left, &capture_right };
	CImageGrabber_FlyCapture2::startSyncCapture(2 /*numCameras*/, cameras);
#else
	capture_right.startCapture(); // Will not start until a strobe is got from the "master" camera:
	capture_left.startCapture();
#endif

	CTicTac tictac;
	cout << "Press any key to stop capture to 'capture.rawlog'..." << endl;

	CFileGZOutputStream fil("./capture.rawlog");

	CDisplayWindow winL("Left"), winR("Right");

	int cnt = 0;

	CObservationImagePtr obsL= CObservationImage::Create();  // Memory will be freed by SF destructor in each loop.
	obsL->sensorLabel="LEFT";

	CObservationImagePtr obsR= CObservationImage::Create();  // Memory will be freed by SF destructor in each loop.
	obsR->sensorLabel="RIGHT";

	while (!mrpt::system::os::kbhit())
	{
		if ( (cnt++ % 20) == 0 )
		{
			if (cnt>0)
			{
				double t = tictac.Tac();
				double FPS = 20 / t;
				printf("\n %f FPS\n", FPS);
			}
			tictac.Tic();
		}

		const bool ok1 = capture_left.getObservation( *obsL );
		const bool ok2 = capture_right.getObservation( *obsR );
		if (!ok1 || !ok2)
		{
			cerr << "Error retrieving images!" << endl;
			break;
		}

		cout << "."; cout.flush();
		if (winL.isOpen()) winL.showImage( obsL->image );
		if (winR.isOpen()) winR.showImage( obsR->image );

		fil << obsL << obsR;
	}

}
示例#29
0
文件: test.cpp 项目: afrancocf/mrpt
void TestCapture_1394()
{
	TCaptureOptions_dc1394 	options;

	uint64_t	cameraGUID = 0;
	uint16_t	cameraUnit = 0;

	options.frame_width = 1024; //640;
	options.frame_height = 768; // 480;
	options.color_coding = COLOR_CODING_YUV422;

	// Other capture options:
	//options.shutter = 900;

	// For stereo Bumblebee tests/debugging (Use the Bumblebee class in mrpt::vision instead!)
//	options.mode7 = 3;
//	options.deinterlace_stereo = true;


	CImageGrabber_dc1394	capture( cameraGUID, cameraUnit, options, true /* Verbose */ );

	CTicTac		tictac;

	cout << "Press any key to stop capture to 'capture.rawlog'..." << endl;

#if DO_CAPTURE
	CFileGZOutputStream fil("./capture.rawlog");
#endif

	CDisplayWindow		win("Capturing...");

	int cnt = 0;

	while (!mrpt::system::os::kbhit())
	{
		if ( (cnt++ % 10) == 0 )
		{
			if (cnt>0)
			{
				double t = tictac.Tac();
				double FPS = 10 / t;
				printf("\n %f FPS\n", FPS);

				// Other capture options:
				//options.shutter = cnt + 1;
				//capture.changeCaptureOptions(options);
			}
			tictac.Tic();
		}

		CObservationImagePtr obs= CObservationImage::Create(); // Memory will be freed by SF destructor in each loop.
		if (!capture.getObservation( *obs ))
		{
			cerr << "Error retrieving images!" << endl;
			break;
		}

#if DO_CAPTURE
		fil << obs;
#endif
		cout << "."; cout.flush();
		if (win.isOpen())
			win.showImage( obs->image );
	}

}
/** The PF algorithm implementation for "optimal sampling for non-parametric observation models"
  */
void  CLSLAM_RBPF_2DLASER::prediction_and_update_pfAuxiliaryPFOptimal(
	CLocalMetricHypothesis	*LMH,
	const mrpt::slam::CActionCollection	* actions,
	const mrpt::slam::CSensoryFrame		* sf,
	const bayes::CParticleFilter::TParticleFilterOptions &PF_options )
{
	MRPT_START

		// Get the current robot pose estimation:
	TPoseID		currentPoseID = LMH->m_currentRobotPose;

	size_t				i,k,N,M = LMH->m_particles.size();

	// ----------------------------------------------------------------------
	//	  We can execute optimal PF only when we have both, an action, and
	//     a valid observation from which to compute the likelihood:
	//   Accumulate odometry/actions until we have a valid observation, then
	//    process them simultaneously.
	// ----------------------------------------------------------------------
//	static CActionRobotMovement2D	accumRobotMovement;
//	static bool						accumRobotMovementIsValid = false;
	bool  SFhasValidObservations = false;
	// A valid action?
	if (actions!=NULL)
	{
		CActionRobotMovement2DPtr act = actions->getBestMovementEstimation();	// Find a robot movement estimation:
		if (!act) THROW_EXCEPTION("Action list does not contain any CActionRobotMovement2D derived object!");

		if (!LMH->m_accumRobotMovementIsValid) // Reset accum.
		{
			act->poseChange->getMean( LMH->m_accumRobotMovement.rawOdometryIncrementReading );
			LMH->m_accumRobotMovement.motionModelConfiguration = act->motionModelConfiguration;
		}
		else
			LMH->m_accumRobotMovement.rawOdometryIncrementReading =
				LMH->m_accumRobotMovement.rawOdometryIncrementReading +
				act->poseChange->getMeanVal();

		LMH->m_accumRobotMovementIsValid = true;
	}

	if (sf!=NULL)
	{
		ASSERT_(LMH->m_particles.size()>0);
		SFhasValidObservations = (*LMH->m_particles.begin()).d->metricMaps.canComputeObservationsLikelihood( *sf );
	}

	// All the needed things?
	if (!LMH->m_accumRobotMovementIsValid || !SFhasValidObservations)
		return; // Nothing we can do here...

	// OK, we have all we need, let's start!

	// Take the accum. actions as input:
	CActionRobotMovement2D		theResultingRobotMov;

	// Over
	keep_max(
		LMH->m_accumRobotMovement.motionModelConfiguration.gausianModel.minStdXY,
		LMH->m_parent->m_options.MIN_ODOMETRY_STD_XY);
	keep_max(
		LMH->m_accumRobotMovement.motionModelConfiguration.gausianModel.minStdPHI,
		LMH->m_parent->m_options.MIN_ODOMETRY_STD_PHI);

	theResultingRobotMov.computeFromOdometry( LMH->m_accumRobotMovement.rawOdometryIncrementReading, LMH->m_accumRobotMovement.motionModelConfiguration );

	const CActionRobotMovement2D		*robotMovement = &theResultingRobotMov;

	LMH->m_accumRobotMovementIsValid = false; // To reset odometry at next iteration!


	// ----------------------------------------------------------------------
	//		0) Common part:  Prepare m_particles "draw" and compute
	// ----------------------------------------------------------------------
	// Precompute a list of "random" samples from the movement model:
	LMH->m_movementDraws.clear();

	// Fast pseudorandom generator of poses...
	//m_movementDraws.resize( max(2000,(int)(PF_options.pfAuxFilterOptimal_MaximumSearchSamples * 5.6574) ) );
	LMH->m_movementDraws.resize( PF_options.pfAuxFilterOptimal_MaximumSearchSamples * M );
	size_t			size_movementDraws = LMH->m_movementDraws.size();
	LMH->m_movementDrawsIdx = (unsigned int)floor(randomGenerator.drawUniform(0.0f,((float)size_movementDraws)-0.01f));

	robotMovement->prepareFastDrawSingleSamples();
	for (size_t i=0;i<LMH->m_movementDraws.size();i++)
		robotMovement->fastDrawSingleSample( LMH->m_movementDraws[i] );

	LMH->m_pfAuxiliaryPFOptimal_estimatedProb.resize(M);
	LMH->m_maxLikelihood.clear();
	LMH->m_maxLikelihood.resize(M,0);
	LMH->m_movementDrawMaximumLikelihood.resize(M);

	// Prepare data for executing "fastDrawSample"
	CTicTac		tictac;
	tictac.Tic();
	LMH->prepareFastDrawSample(
		PF_options,
		particlesEvaluator_AuxPFOptimal,
		robotMovement,
		sf );
	printf("[prepareFastDrawSample] Done in %.06f ms\n",tictac.Tac()*1e3f);

#if 0
	printf("[prepareFastDrawSample] max      (log) = %10.06f\n",  math::maximum(LMH->m_pfAuxiliaryPFOptimal_estimatedProb) );
	printf("[prepareFastDrawSample] max-mean (log) = %10.06f\n", -math::mean(LMH->m_pfAuxiliaryPFOptimal_estimatedProb) + math::maximum(LMH->m_pfAuxiliaryPFOptimal_estimatedProb) );
	printf("[prepareFastDrawSample] max-min  (log) = %10.06f\n", -math::minimum(LMH->m_pfAuxiliaryPFOptimal_estimatedProb) + math::maximum(LMH->m_pfAuxiliaryPFOptimal_estimatedProb) );
#endif

	// Now we have the vector "m_fastDrawProbability" filled out with:
	//     w[i]·p(zt|z^{t-1},x^{[i],t-1},X)
	//  where X is the robot pose prior (as implemented in
	//  the aux. function "particlesEvaluator_AuxPFOptimal"),
	//  and also the "m_maxLikelihood" filled with the maximum lik. values.
	StdVector_CPose2D		newParticles;
	vector<double>			newParticlesWeight;
	vector<size_t>			newParticlesDerivedFromIdx;

	// We need the (aproximate) maximum likelihood value for each
	//  previous particle [i]:
	//
	//     max{ p( z^t | data^[i], x_(t-1)^[i], u_(t) ) }
	//
	//CVectorDouble					maxLikelihood(M, -1 );

	float							MIN_ACCEPT_UNIF_DISTRIB = 0.00f;

	CPose2D							movementDraw,newPose,oldPose;
	double							acceptanceProb,newPoseLikelihood,ratioLikLik;
	unsigned int					statsTrialsCount = 0, statsTrialsSuccess = 0;
	std::vector<bool>				maxLikMovementDrawHasBeenUsed(M, false);
	unsigned int					statsWarningsAccProbAboveOne = 0;
	//double							maxMeanLik = math::maximum( LMH->m_pfAuxiliaryPFOptimal_estimatedProb ); // For normalization purposes only

	ASSERT_(!PF_options.adaptiveSampleSize);

	// ----------------------------------------------------------------------
	//						1) FIXED SAMPLE SIZE VERSION
	// ----------------------------------------------------------------------
	newParticles.resize(M);
	newParticlesWeight.resize(M);
	newParticlesDerivedFromIdx.resize(M);

	bool	doResample = LMH->ESS() < 0.5;

	for (i=0;i<M;i++)
	{
		// Generate a new particle:
		//   (a) Draw a "t-1" m_particles' index:
		// ----------------------------------------------------------------
		if (doResample)
				k = LMH->fastDrawSample(PF_options);		// Based on weights of last step only!
		else	k = i;

		oldPose = CPose2D( *LMH->getCurrentPose(k) );

        //   (b) Rejection-sampling: Draw a new robot pose from x[k],
		//       and accept it with probability p(zk|x) / maxLikelihood:
		// ----------------------------------------------------------------
		if ( LMH->m_SFs.empty() )
		{
			// The first robot pose in the SLAM execution: All m_particles start
			// at the same point (this is the lowest bound of subsequent uncertainty):
			movementDraw = CPose2D(0,0,0);
			newPose = oldPose; // + movementDraw;
		}
		else
		{
			// Rejection-sampling:
			do
			{
				// Draw new robot pose:
				if (!maxLikMovementDrawHasBeenUsed[k])
				{
					// No! first take advantage of a good drawn value, but only once!!
					maxLikMovementDrawHasBeenUsed[k] = true;
					movementDraw = LMH->m_movementDrawMaximumLikelihood[k];
#if 0
					cout << "Drawn pose (max. lik): " << movementDraw << endl;
#endif
				}
				else
				{
					// Draw new robot pose:
					//robotMovement->drawSingleSample( movementDraw );
					//robotMovement->fastDrawSingleSample( movementDraw );
					movementDraw = LMH->m_movementDraws[ LMH->m_movementDrawsIdx++ % size_movementDraws];
				}

				newPose.composeFrom( oldPose, movementDraw ); // newPose = oldPose + movementDraw;

				// Compute acceptance probability:
				newPoseLikelihood = auxiliarComputeObservationLikelihood(PF_options, LMH,k,sf,&newPose);
				ratioLikLik = exp( newPoseLikelihood - LMH->m_maxLikelihood[k] );
				acceptanceProb = min( 1.0, ratioLikLik );

				if ( ratioLikLik > 1)
				{
					if (ratioLikLik>1.5)
					{
						statsWarningsAccProbAboveOne++;
						// DEBUG
						//printf("[pfAuxiliaryPFOptimal] Warning!! p(z|x)/p(z|x*)=%f\n",ratioLikLik);
					}
					LMH->m_maxLikelihood[k] = newPoseLikelihood; //  :'-( !!!
					acceptanceProb = 0;		// Keep searching!!
				}

				statsTrialsCount++;		// Stats

			} while ( acceptanceProb < randomGenerator.drawUniform(MIN_ACCEPT_UNIF_DISTRIB,0.999f) );

			statsTrialsSuccess++;		// Stats:

		}

		// Insert the new particle!:
		newParticles[i] = newPose;

		// And its weight:
		double	weightFact = LMH->m_pfAuxiliaryPFOptimal_estimatedProb[k] * PF_options.powFactor;

		// Add to historic record of log_w weights:
		LMH->m_log_w_metric_history.resize(M);
		LMH->m_log_w_metric_history[i][ currentPoseID] += weightFact;

		if (doResample)
				newParticlesWeight[i] = 0;
		else	newParticlesWeight[i] = LMH->m_particles[k].log_w + weightFact;

		// and its heritance:
		newParticlesDerivedFromIdx[i] = (unsigned int)k;

	} // for i



	// ---------------------------------------------------------------------------------
	// Substitute old by new particle set:
	//   Old are in "m_particles"
	//   New are in "newParticles", "newParticlesWeight","newParticlesDerivedFromIdx"
	// ---------------------------------------------------------------------------------
	N = newParticles.size();
	CLocalMetricHypothesis::CParticleList				newParticlesArray(N);
	CLocalMetricHypothesis::CParticleList::iterator		newPartIt,trgPartIt;

	// For efficiency, just copy the "CRBPFParticleData" from the old particle into the
	//  new one, but this can be done only once:
	std::vector<bool>	oldParticleAlreadyCopied(LMH->m_particles.size(),false);
	CLSLAMParticleData	*newPartData;

	for (newPartIt=newParticlesArray.begin(),i=0;newPartIt!=newParticlesArray.end();newPartIt++,i++)
	{
		// The weight:
		newPartIt->log_w = newParticlesWeight[i];

		// The data (CRBPFParticleData):
		if (!oldParticleAlreadyCopied[newParticlesDerivedFromIdx[i]])
		{
			// The first copy of this old particle:
			newPartData = LMH->m_particles[ newParticlesDerivedFromIdx[i] ].d;
            oldParticleAlreadyCopied[newParticlesDerivedFromIdx[i]] = true;
		}
		else
		{
			// Make a copy:
			newPartData = new CLSLAMParticleData( *LMH->m_particles[ newParticlesDerivedFromIdx[i] ].d );
		}

		newPartIt->d = newPartData;
	} // end for "newPartIt"


	// Now add the new robot pose to the paths: (this MUST be done after the above loop, separately):
	for (newPartIt=newParticlesArray.begin(),i=0;newPartIt!=newParticlesArray.end();newPartIt++,i++)
		newPartIt->d->robotPoses[ LMH->m_currentRobotPose ] = CPose3D( newParticles[i] );

	// Free those old m_particles not being copied into the new ones:
	for (i=0;i<LMH->m_particles.size();i++)
	{
		if (!oldParticleAlreadyCopied[i])
			delete LMH->m_particles[ i ].d;
		// And set all to NULL, so don't try to delete them below:
		LMH->m_particles[ i ].d = NULL;
	}

	// Copy into "m_particles":
	LMH->m_particles.resize( newParticlesArray.size() );
	for (newPartIt=newParticlesArray.begin(),trgPartIt=LMH->m_particles.begin(); newPartIt!=newParticlesArray.end(); newPartIt++, trgPartIt++ )
	{
		trgPartIt->log_w = newPartIt->log_w;
		trgPartIt->d = newPartIt->d;
		newPartIt->d = NULL;
	}

	// Free buffers:
	newParticles.clear();
	newParticlesArray.clear();
	newParticlesWeight.clear();
	newParticlesDerivedFromIdx.clear();

	double out_max_log_w;
	LMH->normalizeWeights( &out_max_log_w );	// Normalize weights:
	LMH->m_log_w += out_max_log_w;

#if 0
	printf("[REJ-SAMP.RATIO: \t%.03f%% \t %u out of %u with P(acep)>1]\n\n",
		100.0f*statsTrialsSuccess / (float)(max(1u,statsTrialsCount)),
		statsWarningsAccProbAboveOne,
		statsTrialsCount
		);
#endif


	MRPT_END
}