Пример #1
0
// ------------------------------------------------------
//				VelodyneView main entry point
// ------------------------------------------------------
int VelodyneView(int argc, char** argv)
{
	// Parse arguments:
	if (!cmd.parse(argc, argv)) return 1;  // should exit.

	if (!arg_nologo.isSet())
	{
		printf(" velodyne-view - Part of the MRPT\n");
		printf(
			" MRPT C++ Library: %s - Sources timestamp: %s\n",
			mrpt::system::MRPT_getVersion().c_str(),
			mrpt::system::MRPT_getCompilationDate().c_str());
	}

	// Launch grabbing thread:
	// --------------------------------------------------------
	TThreadParam thrPar;
	std::thread thHandle(thread_grabbing, std::ref(thrPar));

	// Wait until data stream starts so we can say for sure the sensor has been
	// initialized OK:
	cout << "Waiting for sensor initialization...\n";
	do
	{
		CObservation::Ptr possiblyNewObs = std::atomic_load(&thrPar.new_obs);
		if (possiblyNewObs && possiblyNewObs->timestamp != INVALID_TIMESTAMP)
			break;
		else
			std::this_thread::sleep_for(10ms);
	} while (!thrPar.quit);

	// Check error condition:
	if (thrPar.quit) return 0;

	// Create window and prepare OpenGL object in the scene:
	// --------------------------------------------------------
	mrpt::gui::CDisplayWindow3D win3D("Velodyne 3D view", 800, 600);

	// Allow rendering large number of points without decimation:
	mrpt::global_settings::OCTREE_RENDER_MAX_DENSITY_POINTS_PER_SQPIXEL(1);
	mrpt::global_settings::OCTREE_RENDER_MAX_POINTS_PER_NODE(1e7);

	win3D.setCameraAzimuthDeg(140);
	win3D.setCameraElevationDeg(20);
	win3D.setCameraZoom(8.0);
	win3D.setFOV(90);
	win3D.setCameraPointingToPoint(0, 0, 0);
	mrpt::opengl::CPointCloudColoured::Ptr gl_points =
		mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>();
	gl_points->setPointSize(2.5);

	{
		mrpt::opengl::COpenGLScene::Ptr& scene = win3D.get3DSceneAndLock();

		// Create the Opengl object for the point cloud:
		scene->insert(gl_points);
		scene->insert(mrpt::make_aligned_shared<mrpt::opengl::CGridPlaneXY>());
		scene->insert(mrpt::opengl::stock_objects::CornerXYZ());

		win3D.unlockAccess3DScene();
		win3D.repaint();
	}

	CObservationVelodyneScan::Ptr last_obs;
	CObservationGPS::Ptr last_obs_gps;
	bool view_freeze = false;  // for pausing the view
	CObservationVelodyneScan::TGeneratePointCloudParameters pc_params;

	while (win3D.isOpen() && !thrPar.quit)
	{
		bool do_view_refresh = false;

		CObservationVelodyneScan::Ptr possiblyNewObs =
			std::atomic_load(&thrPar.new_obs);
		CObservationGPS::Ptr possiblyNewObsGps =
			std::atomic_load(&thrPar.new_obs_gps);

		if (possiblyNewObsGps &&
			possiblyNewObsGps->timestamp != INVALID_TIMESTAMP &&
			(!last_obs_gps ||
			 possiblyNewObsGps->timestamp != last_obs_gps->timestamp))
		{
			// It IS a new observation:
			last_obs_gps = std::atomic_load(&thrPar.new_obs_gps);

			std::string rmc_datum;
			if (last_obs_gps->has_RMC_datum)
			{
				rmc_datum = mrpt::format(
					"Lon=%.09f deg  Lat=%.09f deg  Valid?: '%c'\n",
					last_obs_gps->getMsgByClass<gnss::Message_NMEA_RMC>()
						.fields.longitude_degrees,
					last_obs_gps->getMsgByClass<gnss::Message_NMEA_RMC>()
						.fields.latitude_degrees,
					last_obs_gps->getMsgByClass<gnss::Message_NMEA_RMC>()
						.fields.validity_char);
			}
			else
				rmc_datum = "NO";

			win3D.get3DSceneAndLock();
			win3D.addTextMessage(
				5, 40,
				format(
					"POS. frame rx at %s, RMC=%s",
					mrpt::system::dateTimeLocalToString(last_obs_gps->timestamp)
						.c_str(),
					rmc_datum.c_str()),
				TColorf(1, 1, 1), "mono", 10.0, mrpt::opengl::NICE, 102);
			win3D.unlockAccess3DScene();
			do_view_refresh = true;
		}

		if (possiblyNewObs && possiblyNewObs->timestamp != INVALID_TIMESTAMP &&
			(!last_obs || possiblyNewObs->timestamp != last_obs->timestamp))
		{
			// It IS a new observation:
			last_obs = possiblyNewObs;

			if (!last_obs->scan_packets.empty())
			{
				win3D.get3DSceneAndLock();
				win3D.addTextMessage(
					5, 55,
					format(
						"LIDAR scan rx at %s with %u packets",
						mrpt::system::dateTimeLocalToString(last_obs->timestamp)
							.c_str(),
						static_cast<unsigned int>(
							last_obs->scan_packets.size())),
					TColorf(1, 1, 1), "mono", 10.0, mrpt::opengl::NICE, 103);
				win3D.unlockAccess3DScene();
				do_view_refresh = true;
			}

			// Update visualization ---------------------------------------

			// Show 3D points:
			if (!view_freeze)
			{
				last_obs->generatePointCloud(pc_params);

				CColouredPointsMap pntsMap;
				pntsMap.loadFromVelodyneScan(*last_obs);

				win3D.get3DSceneAndLock();
				gl_points->loadFromPointsMap(&pntsMap);
				win3D.unlockAccess3DScene();
			}

			// Estimated grabbing rate:
			win3D.get3DSceneAndLock();
			win3D.addTextMessage(
				-150, -20, format("%.02f Hz", thrPar.Hz), TColorf(1, 1, 1), 100,
				MRPT_GLUT_BITMAP_HELVETICA_18);
			win3D.unlockAccess3DScene();
			do_view_refresh = true;
		}  // end update visualization:

		// Force opengl repaint:
		if (do_view_refresh) win3D.repaint();

		// Process possible keyboard commands:
		// --------------------------------------
		if (win3D.keyHit() && thrPar.pushed_key == 0)
		{
			const int key = tolower(win3D.getPushedKey());

			switch (key)
			{
				// Some of the keys are processed in this thread:
				case 'o':
					win3D.setCameraZoom(win3D.getCameraZoom() * 1.2);
					win3D.repaint();
					break;
				case 'i':
					win3D.setCameraZoom(win3D.getCameraZoom() / 1.2);
					win3D.repaint();
					break;
				case ' ':
					view_freeze = !view_freeze;
					break;
				case '1':
					pc_params.dualKeepLast = !pc_params.dualKeepLast;
					break;
				case '2':
					pc_params.dualKeepStrongest = !pc_params.dualKeepStrongest;
					break;
				// ...and the rest in the sensor thread:
				default:
					thrPar.pushed_key = key;
					break;
			};
		}

		win3D.get3DSceneAndLock();
		win3D.addTextMessage(
			5, 10,
			"'o'/'i'-zoom out/in, mouse: orbit 3D, spacebar: freeze, ESC: quit",
			TColorf(1, 1, 1), "mono", 10.0, mrpt::opengl::NICE, 110);
		win3D.addTextMessage(
			5, 25,
			mrpt::format(
				"'1'/'2': Toggle view dual last (%s)/strongest(%s) returns.",
				pc_params.dualKeepLast ? "ON" : "OFF",
				pc_params.dualKeepStrongest ? "ON" : "OFF"),
			TColorf(1, 1, 1), "mono", 10.0, mrpt::opengl::NICE, 111);
		win3D.unlockAccess3DScene();

		std::this_thread::sleep_for(50ms);
	}

	cout << "Waiting for grabbing thread to exit...\n";
	thrPar.quit = true;
	thHandle.join();
	cout << "Bye!\n";
	return 0;
}
Пример #2
0
// ------------------------------------------------------
//				TestCamera3DFaceDetection
// ------------------------------------------------------
void TestCamera3DFaceDetection(CCameraSensor::Ptr cam)
{
	CDisplayWindow win("Live video");
	CDisplayWindow win2("FaceDetected");

	cout << "Close the window to exit." << endl;

	mrpt::gui::CDisplayWindow3D win3D("3D camera view", 800, 600);
	mrpt::gui::CDisplayWindow3D win3D2;

	win3D.setCameraAzimuthDeg(140);
	win3D.setCameraElevationDeg(20);
	win3D.setCameraZoom(6.0);
	win3D.setCameraPointingToPoint(2.5, 0, 0);

	mrpt::opengl::COpenGLScene::Ptr& scene = win3D.get3DSceneAndLock();
	mrpt::opengl::COpenGLScene::Ptr scene2;

	mrpt::opengl::CPointCloudColoured::Ptr gl_points =
		mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>();
	gl_points->setPointSize(4.5);

	mrpt::opengl::CPointCloudColoured::Ptr gl_points2 =
		mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>();
	gl_points2->setPointSize(4.5);

	// Create the Opengl object for the point cloud:
	scene->insert(gl_points);
	scene->insert(mrpt::make_aligned_shared<mrpt::opengl::CGridPlaneXY>());
	scene->insert(mrpt::opengl::stock_objects::CornerXYZ());

	win3D.unlockAccess3DScene();

	if (showEachDetectedFace)
	{
		win3D2.setWindowTitle("3D Face detected");
		win3D2.resize(400, 300);

		win3D2.setCameraAzimuthDeg(140);
		win3D2.setCameraElevationDeg(20);
		win3D2.setCameraZoom(6.0);
		win3D2.setCameraPointingToPoint(2.5, 0, 0);

		scene2 = win3D2.get3DSceneAndLock();

		scene2->insert(gl_points2);
		scene2->insert(mrpt::make_aligned_shared<mrpt::opengl::CGridPlaneXY>());

		win3D2.unlockAccess3DScene();
	}

	double counter = 0;
	mrpt::utils::CTicTac tictac;

	CVectorDouble fps;

	while (win.isOpen())
	{
		if (!counter) tictac.Tic();

		CObservation3DRangeScan::Ptr o;

		try
		{
			o = std::dynamic_pointer_cast<CObservation3DRangeScan>(
				cam->getNextFrame());
		}
		catch (CExceptionEOF&)
		{
			break;
		}
		ASSERT_(o);

		vector_detectable_object detected;

		// CObservation3DRangeScan::Ptr o =
		// std::dynamic_pointer_cast<CObservation3DRangeScan>(obs);

		faceDetector.detectObjects(o, detected);
		// static int x = 0;

		if (detected.size() > 0)
		{
			for (unsigned int i = 0; i < detected.size(); i++)
			{
				ASSERT_(IS_CLASS(detected[i], CDetectable3D))
				CDetectable3D::Ptr obj =
					std::dynamic_pointer_cast<CDetectable3D>(detected[i]);

				if (showEachDetectedFace)
				{
					CObservation3DRangeScan face;
					o->getZoneAsObs(
						face, obj->m_y, obj->m_y + obj->m_height, obj->m_x,
						obj->m_x + obj->m_width);
					win2.showImage(face.intensityImage);

					if (o->hasPoints3D)
					{
						win3D2.get3DSceneAndLock();

						CColouredPointsMap pntsMap;

						if (!o->hasConfidenceImage)
						{
							pntsMap.colorScheme.scheme =
								CColouredPointsMap::cmFromIntensityImage;
							pntsMap.loadFromRangeScan(face);
						}
						else
						{
							vector<float> xs, ys, zs;
							unsigned int i = 0;
							for (unsigned int j = 0;
								 j < face.confidenceImage.getHeight(); j++)
								for (unsigned int k = 0;
									 k < face.confidenceImage.getWidth();
									 k++, i++)
								{
									unsigned char c =
										*(face.confidenceImage.get_unsafe(
											k, j, 0));
									if (c > faceDetector.m_options
												.confidenceThreshold)
									{
										xs.push_back(face.points3D_x[i]);
										ys.push_back(face.points3D_y[i]);
										zs.push_back(face.points3D_z[i]);
									}
								}

							pntsMap.setAllPoints(xs, ys, zs);
						}

						gl_points2->loadFromPointsMap(&pntsMap);

						win3D2.unlockAccess3DScene();
						win3D2.repaint();
					}
				}

				o->intensityImage.rectangle(
					obj->m_x, obj->m_y, obj->m_x + obj->m_width,
					obj->m_y + obj->m_height, TColor(255, 0, 0));

				// x++;
				// if (( showEachDetectedFace ) && ( x > 430 ) )
				// system::pause();
			}
		}

		win.showImage(o->intensityImage);

		/*if (( showEachDetectedFace ) && ( detected.size() ))
				system::pause();*/

		win3D.get3DSceneAndLock();
		CColouredPointsMap pntsMap;
		pntsMap.colorScheme.scheme = CColouredPointsMap::cmFromIntensityImage;
		pntsMap.loadFromRangeScan(*(o.get()));

		gl_points->loadFromPointsMap(&pntsMap);
		win3D.unlockAccess3DScene();
		win3D.repaint();

		if (++counter == 10)
		{
			double t = tictac.Tac();
			cout << "Frame Rate: " << counter / t << " fps" << endl;
			fps.push_back(counter / t);
			counter = 0;
		}
		std::this_thread::sleep_for(2ms);
	}

	cout << "Fps mean: " << fps.sumAll() / fps.size() << endl;

	faceDetector.experimental_showMeasurements();

	cout << "Closing..." << endl;
}
Пример #3
0
// This method is called in any case for displaying a laser scan.
//  We keep an internal list of recent scans so they don't vanish instantaneously.
void CScanAnimation::BuildMapAndRefresh(CSensoryFrame *sf)
{
	WX_START_TRY

	// Preprocess: make sure 3D observations are ready:
	std::vector<CObservation3DRangeScanPtr> obs3D_to_clear;
	for (CSensoryFrame::iterator it=sf->begin();it!=sf->end();++it)
	{
		(*it)->load();
		// force generate 3D point clouds:
		if (IS_CLASS(*it, CObservation3DRangeScan))
		{
			CObservation3DRangeScanPtr o= CObservation3DRangeScanPtr(*it);
			if (o->hasRangeImage && !o->hasPoints3D)
			{
				o->project3DPointsFromDepthImage();
				obs3D_to_clear.push_back(o);
			}
		}
	}


	// Mix?
	if (!m_mixlasers)
	{
		// if not, just clear all old objects:
		for (TListGlObjects::iterator it = m_gl_objects.begin();it!=m_gl_objects.end();++it)
		{
			TRenderObject &ro = it->second;
			m_plot3D->m_openGLScene->removeObject(ro.obj);  // Remove from the opengl viewport
		}
		m_gl_objects.clear();
	}

	// Insert new scans:
	mrpt::system::TTimeStamp  	tim_last = INVALID_TIMESTAMP;
	bool						wereScans = false;
	for (CSensoryFrame::iterator it=sf->begin();it!=sf->end();++it)
	{
		if (IS_CLASS(*it,CObservation2DRangeScan))
		{
			CObservation2DRangeScanPtr obs = CObservation2DRangeScanPtr(*it);
			wereScans = true;
			if (tim_last==INVALID_TIMESTAMP || tim_last<obs->timestamp)
				tim_last = obs->timestamp;

			// Already in the map with the same sensor label?
			TListGlObjects::iterator it_gl = m_gl_objects.find(obs->sensorLabel);
			if (it_gl!=m_gl_objects.end())
			{
				// Update existing object:
				TRenderObject &ro = it_gl->second;
				CPlanarLaserScanPtr(ro.obj)->setScan(*obs);
				ro.timestamp = obs->timestamp;
			}
			else
			{
				// Create object:
				CPlanarLaserScanPtr gl_obj = CPlanarLaserScan::Create();
				gl_obj->setScan(*obs);

				TRenderObject ro;
				ro.obj = gl_obj;
				ro.timestamp = obs->timestamp;
				m_gl_objects[obs->sensorLabel]=ro;
				m_plot3D->m_openGLScene->insert( gl_obj );
			}
		}
		else
		if (IS_CLASS(*it,CObservation3DRangeScan))
		{
			CObservation3DRangeScanPtr obs = CObservation3DRangeScanPtr(*it);
			wereScans = true;
			if (tim_last==INVALID_TIMESTAMP || tim_last<obs->timestamp)
				tim_last = obs->timestamp;

			CColouredPointsMap  pointMap;
			pointMap.colorScheme.scheme = CColouredPointsMap::cmFromIntensityImage;
			pointMap.insertionOptions.minDistBetweenLaserPoints = 0;

			pointMap.insertObservation( obs.pointer() );

			// Already in the map with the same sensor label?
			TListGlObjects::iterator it_gl = m_gl_objects.find(obs->sensorLabel);
			if (it_gl!=m_gl_objects.end())
			{
				// Update existing object:
				TRenderObject &ro = it_gl->second;
				CPointCloudColouredPtr gl_obj = CPointCloudColouredPtr(ro.obj);
				gl_obj->loadFromPointsMap(&pointMap);
				ro.timestamp = obs->timestamp;
			}
			else
			{
				// Create object:
				CPointCloudColouredPtr gl_obj = CPointCloudColoured::Create();
				gl_obj->setPointSize(3.0);
				gl_obj->loadFromPointsMap(&pointMap);

				TRenderObject ro;
				ro.obj = gl_obj;
				ro.timestamp = obs->timestamp;
				m_gl_objects[obs->sensorLabel]=ro;
				m_plot3D->m_openGLScene->insert( gl_obj );
			}


//
			// Add to list:
//				m_lstScans[obs->sensorLabel] = obs;
		}
	}

	// Check what observations are too old and must be deleted:
	const double largest_period = 0.2;
	vector_string lst_to_delete;
	for (TListGlObjects::iterator it=m_gl_objects.begin();it!=m_gl_objects.end();++it)
	{
		TRenderObject &ro = it->second;

		if ((tim_last==INVALID_TIMESTAMP && wereScans)  // Scans without timestamps
			||
			(tim_last!=INVALID_TIMESTAMP && fabs(mrpt::system::timeDifference( ro.timestamp, tim_last)) > largest_period ))
		{
			lst_to_delete.push_back(it->first);
		}
	}

	// Remove too old observations:
	for (vector_string::iterator s=lst_to_delete.begin();s!=lst_to_delete.end();++s)
	{
		TRenderObject &ro = m_gl_objects[*s];

		m_plot3D->m_openGLScene->removeObject(ro.obj);  // Remove from the opengl viewport
		m_gl_objects.erase(*s); // and from my list
	}

	// Force refresh view:
	m_plot3D->Refresh();

	// Post-process: unload 3D observations.
	for (CSensoryFrame::iterator it=sf->begin();it!=sf->end();++it)
		(*it)->unload();
	for (size_t i=0;i<obs3D_to_clear.size();i++)
	{
		obs3D_to_clear[i]->resizePoints3DVectors(0);
		obs3D_to_clear[i]->hasPoints3D = false;
	}


	WX_END_TRY
}
Пример #4
0
// ------------------------------------------------------
//				Test_SwissRanger
// ------------------------------------------------------
void Test_SwissRanger()
{
	CSwissRanger3DCamera cam;

	// Set params:
	cam.setOpenFromUSB(true);

	cam.setSave3D(true);
	cam.setSaveRangeImage(true);
	cam.setSaveIntensityImage(true);
	cam.setSaveConfidenceImage(false);

	// cam.enablePreviewWindow(true);

	// Open:
	cam.initialize();

	if (cam.isOpen())
		cout << "[Test_SwissRanger] Camera open, serial #"
			 << cam.getCameraSerialNumber() << " resolution: " << cam.cols()
			 << "x" << cam.rows() << " max. range: " << cam.getMaxRange()
			 << endl;

	const double aspect_ratio = cam.rows() / double(cam.cols());

	{
		std::string ver;
		cam.getMesaLibVersion(ver);
		cout << "[Test_SwissRanger] Version: " << ver << "\n";
	}

	CObservation3DRangeScan obs;
	bool there_is_obs = true, hard_error;

	mrpt::gui::CDisplayWindow3D win3D("3D camera view", 800, 600);

	win3D.setCameraAzimuthDeg(140);
	win3D.setCameraElevationDeg(20);
	win3D.setCameraZoom(6.0);
	win3D.setCameraPointingToPoint(2.5, 0, 0);

	// mrpt::gui::CDisplayWindow  win2D("2D range image",200,200);
	// mrpt::gui::CDisplayWindow  winInt("Intensity range image",200,200);
	//	win2D.setPos(10,10);
	//	winInt.setPos(350,10);
	//	win3D.setPos(10,290);
	//	win3D.resize(400,200);

	// mrpt::opengl::CPointCloud::Ptr gl_points =
	// mrpt::make_aligned_shared<mrpt::opengl::CPointCloud>();
	mrpt::opengl::CPointCloudColoured::Ptr gl_points =
		mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>();
	gl_points->setPointSize(4.5);

	mrpt::opengl::CTexturedPlane::Ptr gl_img_range =
		mrpt::make_aligned_shared<mrpt::opengl::CTexturedPlane>(
			0.5, -0.5, -0.5 * aspect_ratio, 0.5 * aspect_ratio);
	mrpt::opengl::CTexturedPlane::Ptr gl_img_intensity =
		mrpt::make_aligned_shared<mrpt::opengl::CTexturedPlane>(
			0.5, -0.5, -0.5 * aspect_ratio, 0.5 * aspect_ratio);
	mrpt::opengl::CTexturedPlane::Ptr gl_img_intensity_rect =
		mrpt::make_aligned_shared<mrpt::opengl::CTexturedPlane>(
			0.5, -0.5, -0.5 * aspect_ratio, 0.5 * aspect_ratio);

	{
		mrpt::opengl::COpenGLScene::Ptr& scene = win3D.get3DSceneAndLock();

		// Create the Opengl object for the point cloud:
		scene->insert(gl_points);
		scene->insert(mrpt::make_aligned_shared<mrpt::opengl::CGridPlaneXY>());
		scene->insert(mrpt::opengl::stock_objects::CornerXYZ());

		const int VW_WIDTH = 200;
		const int VW_HEIGHT = 150;
		const int VW_GAP = 10;

		// Create the Opengl objects for the planar images, as textured planes,
		// each in a separate viewport:
		win3D.addTextMessage(
			30, -10 - 1 * (VW_GAP + VW_HEIGHT), "Range data", TColorf(1, 1, 1),
			1, MRPT_GLUT_BITMAP_HELVETICA_12);
		opengl::COpenGLViewport::Ptr viewRange =
			scene->createViewport("view2d_range");
		scene->insert(gl_img_range, "view2d_range");
		viewRange->setViewportPosition(
			5, -10 - 1 * (VW_GAP + VW_HEIGHT), VW_WIDTH, VW_HEIGHT);
		viewRange->setTransparent(true);
		viewRange->getCamera().setOrthogonal(true);
		viewRange->getCamera().setAzimuthDegrees(90);
		viewRange->getCamera().setElevationDegrees(90);
		viewRange->getCamera().setZoomDistance(1.0);

		win3D.addTextMessage(
			30, -10 - 2 * (VW_GAP + VW_HEIGHT), "Intensity data",
			TColorf(1, 1, 1), 2, MRPT_GLUT_BITMAP_HELVETICA_12);
		opengl::COpenGLViewport::Ptr viewInt =
			scene->createViewport("view2d_int");
		scene->insert(gl_img_intensity, "view2d_int");
		viewInt->setViewportPosition(
			5, -10 - 2 * (VW_GAP + VW_HEIGHT), VW_WIDTH, VW_HEIGHT);
		viewInt->setTransparent(true);
		viewInt->getCamera().setOrthogonal(true);
		viewInt->getCamera().setAzimuthDegrees(90);
		viewInt->getCamera().setElevationDegrees(90);
		viewInt->getCamera().setZoomDistance(1.0);

		win3D.addTextMessage(
			30, -10 - 3 * (VW_GAP + VW_HEIGHT), "Intensity data (undistorted)",
			TColorf(1, 1, 1), 3, MRPT_GLUT_BITMAP_HELVETICA_12);
		opengl::COpenGLViewport::Ptr viewIntRect =
			scene->createViewport("view2d_intrect");
		scene->insert(gl_img_intensity_rect, "view2d_intrect");
		viewIntRect->setViewportPosition(
			5, -10 - 3 * (VW_GAP + VW_HEIGHT), VW_WIDTH, VW_HEIGHT);
		viewIntRect->setTransparent(true);
		viewIntRect->getCamera().setOrthogonal(true);
		viewIntRect->getCamera().setAzimuthDegrees(90);
		viewIntRect->getCamera().setElevationDegrees(90);
		viewIntRect->getCamera().setZoomDistance(1.0);

		win3D.unlockAccess3DScene();
		win3D.repaint();
	}

	CTicTac tictac;
	size_t nImgs = 0;

	bool endLoop = false;

	while (there_is_obs && !endLoop && win3D.isOpen())
	{
		// Grab new observation from the camera:
		cam.getNextObservation(obs, there_is_obs, hard_error);

		// Show ranges as 2D:
		if (there_is_obs && obs.hasRangeImage)
		{
			mrpt::img::CImage img;
			// Normalize the image
			CMatrixFloat range2D = obs.rangeImage;
			range2D *= 1.0 / cam.getMaxRange();
			img.setFromMatrix(range2D);

			win3D.get3DSceneAndLock();
			gl_img_range->assignImage_fast(img);
			win3D.unlockAccess3DScene();
		}

		// Show intensity image:
		if (there_is_obs && obs.hasIntensityImage)
		{
			win3D.get3DSceneAndLock();
			gl_img_intensity->assignImage(obs.intensityImage);

			CImage undistortImg;
			obs.intensityImage.undistort(undistortImg, obs.cameraParams);
			gl_img_intensity_rect->assignImage(undistortImg);
			win3D.unlockAccess3DScene();
		}

		// Show 3D points:
		if (there_is_obs && obs.hasPoints3D)
		{
			// mrpt::maps::CSimplePointsMap  pntsMap;
			CColouredPointsMap pntsMap;
			pntsMap.colorScheme.scheme =
				CColouredPointsMap::cmFromIntensityImage;
			pntsMap.loadFromRangeScan(obs);

			win3D.get3DSceneAndLock();
			gl_points->loadFromPointsMap(&pntsMap);
			win3D.unlockAccess3DScene();
			win3D.repaint();
		}

		nImgs++;
		if (nImgs > 10)
		{
			win3D.get3DSceneAndLock();
			win3D.addTextMessage(
				0.01, 0.01, format("%.02f Hz", nImgs / tictac.Tac()),
				TColorf(0, 1, 1), 100, MRPT_GLUT_BITMAP_HELVETICA_12);
			win3D.unlockAccess3DScene();
			nImgs = 0;
			tictac.Tic();
		}

		// Process possible keyboard commands:
		// --------------------------------------
		if (win3D.keyHit())
		{
			const int key = tolower(win3D.getPushedKey());
			// cout << "key: " << key << endl;

			switch (key)
			{
				case 'h':
					cam.enableImageHistEqualization(
						!cam.isEnabledImageHistEqualization());
					break;
				case 'g':
					cam.enableConvGray(!cam.isEnabledConvGray());
					break;
				case 'd':
					cam.enableDenoiseANF(!cam.isEnabledDenoiseANF());
					break;
				case 'f':
					cam.enableMedianFilter(!cam.isEnabledMedianFilter());
					break;
				case 27:
					endLoop = true;
					break;
			}
		}

		win3D.get3DSceneAndLock();
		win3D.addTextMessage(
			0.08, 0.02,
			format(
				"Keyboard switches: H (hist.equal: %s) | G (convGray: %s) | D "
				"(denoise: %s) | F (medianFilter: %s)",
				cam.isEnabledImageHistEqualization() ? "ON" : "OFF",
				cam.isEnabledConvGray() ? "ON" : "OFF",
				cam.isEnabledDenoiseANF() ? "ON" : "OFF",
				cam.isEnabledMedianFilter() ? "ON" : "OFF"),
			TColorf(0, 0, 1), 110, MRPT_GLUT_BITMAP_HELVETICA_18);
		win3D.unlockAccess3DScene();

		std::this_thread::sleep_for(1ms);
	}
}
Пример #5
0
// ------------------------------------------------------
//				Test_Kinect
// ------------------------------------------------------
void Test_Kinect()
{
	// Launch grabbing thread:
	// --------------------------------------------------------
	TThreadParam thrPar;
	std::thread thHandle = std::thread(thread_grabbing, std::ref(thrPar));

	// Wait until data stream starts so we can say for sure the sensor has been
	// initialized OK:
	cout << "Waiting for sensor initialization...\n";
	do
	{
		CObservation3DRangeScan::Ptr possiblyNewObs =
			std::atomic_load(&thrPar.new_obs);
		if (possiblyNewObs && possiblyNewObs->timestamp != INVALID_TIMESTAMP)
			break;
		else
			std::this_thread::sleep_for(10ms);
	} while (!thrPar.quit);

	// Check error condition:
	if (thrPar.quit) return;

	// Feature tracking variables:
	CFeatureList trackedFeats;
	unsigned int step_num = 0;

	bool SHOW_FEAT_IDS = true;
	bool SHOW_RESPONSES = true;

	CGenericFeatureTrackerAutoPtr tracker;

	// "CFeatureTracker_KL" is by far the most robust implementation for now:
	tracker = CGenericFeatureTrackerAutoPtr(new CFeatureTracker_KL);
	tracker->enableTimeLogger(true);  // Do time profiling.

	// Set of parameters common to any tracker implementation:
	// To see all the existing params and documentation, see
	// mrpt::vision::CGenericFeatureTracker
	//  http://reference.mrpt.org/devel/structmrpt_1_1vision_1_1_c_generic_feature_tracker.html
	tracker->extra_params["add_new_features"] =
		1;  // track, AND ALSO, add new features
	tracker->extra_params["add_new_feat_min_separation"] = 25;
	tracker->extra_params["add_new_feat_max_features"] = 150;
	tracker->extra_params["add_new_feat_patch_size"] = 21;

	tracker->extra_params["minimum_KLT_response_to_add"] = 40;
	tracker->extra_params["check_KLT_response_every"] =
		5;  // Re-check the KLT-response to assure features are in good points.
	tracker->extra_params["minimum_KLT_response"] =
		25;  // Re-check the KLT-response to assure features are in good points.

	tracker->extra_params["update_patches_every"] = 0;  // Update patches

	// Specific params for "CFeatureTracker_KL"
	tracker->extra_params["window_width"] = 25;
	tracker->extra_params["window_height"] = 25;

	// Global points map:
	CColouredPointsMap globalPtsMap;
	globalPtsMap.colorScheme.scheme =
		CColouredPointsMap::cmFromIntensityImage;  // Take points color from
	// RGB+D observations
	// globalPtsMap.colorScheme.scheme =
	// CColouredPointsMap::cmFromHeightRelativeToSensorGray;

	// Create window and prepare OpenGL object in the scene:
	// --------------------------------------------------------
	mrpt::gui::CDisplayWindow3D win3D("kinect-3d-slam 3D view", 800, 600);

	win3D.setCameraAzimuthDeg(140);
	win3D.setCameraElevationDeg(20);
	win3D.setCameraZoom(8.0);
	win3D.setFOV(90);
	win3D.setCameraPointingToPoint(2.5, 0, 0);

	mrpt::opengl::CPointCloudColoured::Ptr gl_points =
		mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>();
	gl_points->setPointSize(2.5);

	mrpt::opengl::CSetOfObjects::Ptr gl_curFeats =
		mrpt::make_aligned_shared<mrpt::opengl::CSetOfObjects>();
	mrpt::opengl::CSetOfObjects::Ptr gl_keyframes =
		mrpt::make_aligned_shared<mrpt::opengl::CSetOfObjects>();

	mrpt::opengl::CPointCloudColoured::Ptr gl_points_map =
		mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>();
	gl_points_map->setPointSize(2.0);

	const double aspect_ratio =
		480.0 / 640.0;  // kinect.rows() / double( kinect.cols() );

	mrpt::opengl::CSetOfObjects::Ptr gl_cur_cam_corner =
		mrpt::opengl::stock_objects::CornerXYZSimple(0.4f, 4);

	opengl::COpenGLViewport::Ptr viewInt;
	{
		mrpt::opengl::COpenGLScene::Ptr& scene = win3D.get3DSceneAndLock();

		// Create the Opengl object for the point cloud:
		scene->insert(gl_points_map);
		scene->insert(gl_points);
		scene->insert(gl_curFeats);
		scene->insert(gl_keyframes);
		scene->insert(mrpt::make_aligned_shared<mrpt::opengl::CGridPlaneXY>());

		scene->insert(gl_cur_cam_corner);

		const int VW_WIDTH =
			350;  // Size of the viewport into the window, in pixel units.
		const int VW_HEIGHT = aspect_ratio * VW_WIDTH;

		// Create the Opengl objects for the planar images each in a separate
		// viewport:
		viewInt = scene->createViewport("view2d_int");
		viewInt->setViewportPosition(2, 2, VW_WIDTH, VW_HEIGHT);
		viewInt->setTransparent(true);

		win3D.unlockAccess3DScene();
		win3D.repaint();
	}

	CImage previous_image;

	map<TFeatureID, TPoint3D> lastVisibleFeats;
	std::vector<TPose3D>
		camera_key_frames_path;  // The 6D path of the Kinect camera.
	CPose3D
		currentCamPose_wrt_last;  // wrt last pose in "camera_key_frames_path"

	bool gl_keyframes_must_refresh =
		true;  // Need to update gl_keyframes from camera_key_frames_path??
	CObservation3DRangeScan::Ptr last_obs;
	string str_status, str_status2;

	while (win3D.isOpen() && !thrPar.quit)
	{
		CObservation3DRangeScan::Ptr possiblyNewObs =
			std::atomic_load(&thrPar.new_obs);
		if (possiblyNewObs && possiblyNewObs->timestamp != INVALID_TIMESTAMP &&
			(!last_obs || possiblyNewObs->timestamp != last_obs->timestamp))
		{
			// It IS a new observation:
			last_obs = possiblyNewObs;

			// Feature tracking -------------------------------------------
			ASSERT_(last_obs->hasIntensityImage);

			CImage theImg;  // The grabbed image:
			theImg = last_obs->intensityImage;

			// Do tracking:
			if (step_num > 1)  // we need "previous_image" to be valid.
			{
				tracker->trackFeatures(previous_image, theImg, trackedFeats);

				// Remove those now out of the image plane:
				CFeatureList::iterator itFeat = trackedFeats.begin();
				while (itFeat != trackedFeats.end())
				{
					const TFeatureTrackStatus status = (*itFeat)->track_status;
					bool eras =
						(status_TRACKED != status && status_IDLE != status);
					if (!eras)
					{
						// Also, check if it's too close to the image border:
						const float x = (*itFeat)->x;
						const float y = (*itFeat)->y;
						static const float MIN_DIST_MARGIN_TO_STOP_TRACKING =
							10;
						if (x < MIN_DIST_MARGIN_TO_STOP_TRACKING ||
							y < MIN_DIST_MARGIN_TO_STOP_TRACKING ||
							x > (last_obs->cameraParamsIntensity.ncols -
								 MIN_DIST_MARGIN_TO_STOP_TRACKING) ||
							y > (last_obs->cameraParamsIntensity.nrows -
								 MIN_DIST_MARGIN_TO_STOP_TRACKING))
						{
							eras = true;
						}
					}
					if (eras)  // Erase or keep?
						itFeat = trackedFeats.erase(itFeat);
					else
						++itFeat;
				}
			}

			// Create list of 3D features in space, wrt current camera pose:
			// --------------------------------------------------------------------
			map<TFeatureID, TPoint3D> curVisibleFeats;
			for (CFeatureList::iterator itFeat = trackedFeats.begin();
				 itFeat != trackedFeats.end(); ++itFeat)
			{
				// Pixel coordinates in the intensity image:
				const int int_x = (*itFeat)->x;
				const int int_y = (*itFeat)->y;

				// Convert to pixel coords in the range image:
				//  APPROXIMATION: Assume coordinates are equal (that's not
				//  exact!!)
				const int x = int_x;
				const int y = int_y;

				// Does this (x,y) have valid range data?
				const float d = last_obs->rangeImage(y, x);
				if (d > 0.05 && d < 10.0)
				{
					ASSERT_(
						size_t(
							last_obs->rangeImage.cols() *
							last_obs->rangeImage.rows()) ==
						last_obs->points3D_x.size());
					const size_t nPt = last_obs->rangeImage.cols() * y + x;
					curVisibleFeats[(*itFeat)->ID] = TPoint3D(
						last_obs->points3D_x[nPt], last_obs->points3D_y[nPt],
						last_obs->points3D_z[nPt]);
				}
			}

			// Load local points map from 3D points + color:
			CColouredPointsMap localPntsMap;
			localPntsMap.colorScheme.scheme =
				CColouredPointsMap::cmFromIntensityImage;
			localPntsMap.loadFromRangeScan(*last_obs);

			// Estimate our current camera pose from feature2feature matching:
			// --------------------------------------------------------------------
			if (!lastVisibleFeats.empty())
			{
				TMatchingPairList corrs;  // pairs of correspondences

				for (map<TFeatureID, TPoint3D>::const_iterator itCur =
						 curVisibleFeats.begin();
					 itCur != curVisibleFeats.end(); ++itCur)
				{
					map<TFeatureID, TPoint3D>::const_iterator itFound =
						lastVisibleFeats.find(itCur->first);
					if (itFound != lastVisibleFeats.end())
					{
						corrs.push_back(
							TMatchingPair(
								itFound->first, itCur->first, itFound->second.x,
								itFound->second.y, itFound->second.z,
								itCur->second.x, itCur->second.y,
								itCur->second.z));
					}
				}

				if (corrs.size() >= 3)
				{
					// Find matchings:
					mrpt::tfest::TSE3RobustParams params;
					params.ransac_minSetSize = 3;
					params.ransac_maxSetSizePct = 6.0 / corrs.size();

					mrpt::tfest::TSE3RobustResult results;
					bool register_ok = false;
					try
					{
						mrpt::tfest::se3_l2_robust(corrs, params, results);
						register_ok = true;
					}
					catch (std::exception&)
					{
						/* Cannot find a minimum number of matches, inconsistent
						 * parameters due to very reduced numberof matches,etc.
						 */
					}

					const CPose3D relativePose =
						CPose3D(results.transformation);

					str_status = mrpt::format(
						"%d corrs | inliers: %d | rel.pose: %s ",
						int(corrs.size()), int(results.inliers_idx.size()),
						relativePose.asString().c_str());
					str_status2 = string(
						results.inliers_idx.size() == 0
							? "LOST! Please, press 'r' to restart"
							: "");

					if (register_ok && std::abs(results.scale - 1.0) < 0.1)
					{
						// Seems a good match:
						if ((relativePose.norm() > KEYFRAMES_MIN_DISTANCE ||
							 std::abs(relativePose.yaw()) > KEYFRAMES_MIN_ANG ||
							 std::abs(relativePose.pitch()) >
								 KEYFRAMES_MIN_ANG ||
							 std::abs(relativePose.roll()) > KEYFRAMES_MIN_ANG))
						{
							// Accept this as a new key-frame pose ------------
							// Append new global pose of this key-frame:

							const CPose3D new_keyframe_global =
								CPose3D(*camera_key_frames_path.rbegin()) +
								relativePose;

							camera_key_frames_path.push_back(
								new_keyframe_global.asTPose());

							gl_keyframes_must_refresh = true;

							currentCamPose_wrt_last =
								CPose3D();  // It's (0,0,0) since the last
							// key-frame is the current pose!
							lastVisibleFeats = curVisibleFeats;

							cout << "Adding new key-frame: pose="
								 << new_keyframe_global << endl;

							// Update global map: append another map at a given
							// position:
							globalPtsMap.insertObservation(
								last_obs.get(), &new_keyframe_global);
							win3D.get3DSceneAndLock();
							gl_points_map->loadFromPointsMap(&globalPtsMap);
							win3D.unlockAccess3DScene();
						}
						else
						{
							currentCamPose_wrt_last = relativePose;
							// cout << "cur pose: " << currentCamPose_wrt_last
							// << endl;
						}
					}
				}
			}

			if (camera_key_frames_path.empty() || lastVisibleFeats.empty())
			{
				// First iteration:
				camera_key_frames_path.clear();
				camera_key_frames_path.push_back(TPose3D(0, 0, 0, 0, 0, 0));
				gl_keyframes_must_refresh = true;
				lastVisibleFeats = curVisibleFeats;

				// Update global map:
				globalPtsMap.clear();
				globalPtsMap.insertObservation(last_obs.get());

				win3D.get3DSceneAndLock();
				gl_points_map->loadFromPointsMap(&globalPtsMap);
				win3D.unlockAccess3DScene();
			}

			// Save the image for the next step:
			previous_image = theImg;

			// Draw marks on the RGB image:
			theImg.selectTextFont("10x20");
			{  // Tracked feats:
				theImg.drawFeatures(
					trackedFeats, TColor(0, 0, 255), SHOW_FEAT_IDS,
					SHOW_RESPONSES);
				theImg.textOut(
					3, 22,
					format("# feats: %u", (unsigned int)trackedFeats.size()),
					TColor(200, 20, 20));
			}

			// Update visualization ---------------------------------------

			// Show intensity image
			win3D.get3DSceneAndLock();
			viewInt->setImageView(theImg);
			win3D.unlockAccess3DScene();

			// Show 3D points & current visible feats, at the current camera 3D
			// pose "currentCamPose_wrt_last"
			// ---------------------------------------------------------------------
			if (last_obs->hasPoints3D)
			{
				const CPose3D curGlobalPose =
					CPose3D(*camera_key_frames_path.rbegin()) +
					currentCamPose_wrt_last;
				win3D.get3DSceneAndLock();
				// All 3D points:
				gl_points->loadFromPointsMap(&localPntsMap);
				gl_points->setPose(curGlobalPose);
				gl_cur_cam_corner->setPose(curGlobalPose);

				// Current visual landmarks:
				gl_curFeats->clear();
				for (map<TFeatureID, TPoint3D>::const_iterator it =
						 curVisibleFeats.begin();
					 it != curVisibleFeats.end(); ++it)
				{
					static double D = 0.02;
					mrpt::opengl::CBox::Ptr box =
						mrpt::make_aligned_shared<mrpt::opengl::CBox>(
							TPoint3D(-D, -D, -D), TPoint3D(D, D, D));
					box->setWireframe(true);
					box->setName(format("%d", int(it->first)));
					box->enableShowName(true);
					box->setLocation(it->second);
					gl_curFeats->insert(box);
				}
				gl_curFeats->setPose(curGlobalPose);

				win3D.unlockAccess3DScene();
				win3D.repaint();
			}

			win3D.get3DSceneAndLock();
			win3D.addTextMessage(
				-100, -20, format("%.02f Hz", thrPar.Hz), TColorf(0, 1, 1), 100,
				MRPT_GLUT_BITMAP_HELVETICA_18);
			win3D.unlockAccess3DScene();

			win3D.repaint();

			step_num++;

		}  // end update visualization:

		if (gl_keyframes_must_refresh)
		{
			gl_keyframes_must_refresh = false;
			// cout << "Updating gl_keyframes with " <<
			// camera_key_frames_path.size() << " frames.\n";
			win3D.get3DSceneAndLock();
			gl_keyframes->clear();
			for (size_t i = 0; i < camera_key_frames_path.size(); i++)
			{
				CSetOfObjects::Ptr obj =
					mrpt::opengl::stock_objects::CornerXYZSimple(0.3f, 3);
				obj->setPose(camera_key_frames_path[i]);
				gl_keyframes->insert(obj);
			}
			win3D.unlockAccess3DScene();
		}

		// Process possible keyboard commands:
		// --------------------------------------
		if (win3D.keyHit() && thrPar.pushed_key == 0)
		{
			const int key = tolower(win3D.getPushedKey());

			switch (key)
			{
				// Some of the keys are processed in this thread:
				case 'r':
					lastVisibleFeats.clear();
					camera_key_frames_path.clear();
					gl_keyframes_must_refresh = true;
					globalPtsMap.clear();
					win3D.get3DSceneAndLock();
					gl_points_map->loadFromPointsMap(&globalPtsMap);
					win3D.unlockAccess3DScene();

					break;
				case 's':
				{
					const std::string s = "point_cloud.txt";
					cout << "Dumping 3D point-cloud to: " << s << endl;
					globalPtsMap.save3D_to_text_file(s);
					break;
				}
				case 'o':
					win3D.setCameraZoom(win3D.getCameraZoom() * 1.2);
					win3D.repaint();
					break;
				case 'i':
					win3D.setCameraZoom(win3D.getCameraZoom() / 1.2);
					win3D.repaint();
					break;
				// ...and the rest in the kinect thread:
				default:
					thrPar.pushed_key = key;
					break;
			};
		}

		win3D.get3DSceneAndLock();
		win3D.addTextMessage(
			2, -30, format(
						"'s':save point cloud, 'r': reset, 'o'/'i': zoom "
						"out/in, mouse: orbit 3D, ESC: quit"),
			TColorf(1, 1, 1), 110, MRPT_GLUT_BITMAP_HELVETICA_12);
		win3D.addTextMessage(
			2, -50, str_status, TColorf(1, 1, 1), 111,
			MRPT_GLUT_BITMAP_HELVETICA_12);
		win3D.addTextMessage(
			2, -70, str_status2, TColorf(1, 1, 1), 112,
			MRPT_GLUT_BITMAP_HELVETICA_18);
		win3D.unlockAccess3DScene();

		std::this_thread::sleep_for(1ms);
	}

	cout << "Waiting for grabbing thread to exit...\n";
	thrPar.quit = true;
	thHandle.join();
	cout << "Bye!\n";
}
Пример #6
0
// ------------------------------------------------------
//				Test_Kinect
// ------------------------------------------------------
void Test_Kinect()
{
	// Launch grabbing thread:
	// --------------------------------------------------------
	TThreadParam thrPar;
	mrpt::system::TThreadHandle thHandle= mrpt::system::createThreadRef(thread_grabbing ,thrPar);

	// Wait until data stream starts so we can say for sure the sensor has been initialized OK:
	cout << "Waiting for sensor initialization...\n";
	do {
		CObservation3DRangeScanPtr possiblyNewObs = thrPar.new_obs.get();
		if (possiblyNewObs && possiblyNewObs->timestamp!=INVALID_TIMESTAMP)
				break;
		else 	mrpt::system::sleep(10);
	} while (!thrPar.quit);

	// Check error condition:
	if (thrPar.quit) return;


	// Create window and prepare OpenGL object in the scene:
	// --------------------------------------------------------
	mrpt::gui::CDisplayWindow3D  win3D("Kinect 3D view",800,600);

	win3D.setCameraAzimuthDeg(140);
	win3D.setCameraElevationDeg(20);
	win3D.setCameraZoom(8.0);
	win3D.setFOV(90);
	win3D.setCameraPointingToPoint(2.5,0,0);

#if !defined(VIEW_AS_OCTOMAP)
	mrpt::opengl::CPointCloudColouredPtr gl_points = mrpt::opengl::CPointCloudColoured::Create();
	gl_points->setPointSize(2.5);
#else
	mrpt::opengl::COctoMapVoxelsPtr gl_voxels = mrpt::opengl::COctoMapVoxels::Create();
#endif

	const double aspect_ratio =  480.0 / 640.0; // kinect.getRowCount() / double( kinect.getColCount() );

	opengl::COpenGLViewportPtr viewRange, viewInt; // Extra viewports for the RGB & D images.
	{
		mrpt::opengl::COpenGLScenePtr &scene = win3D.get3DSceneAndLock();

		// Create the Opengl object for the point cloud:
#if !defined(VIEW_AS_OCTOMAP)
		scene->insert( gl_points );
#else
		scene->insert( gl_voxels );
#endif
		scene->insert( mrpt::opengl::CGridPlaneXY::Create() );
		scene->insert( mrpt::opengl::stock_objects::CornerXYZ() );

		const int VW_WIDTH = 250;	// Size of the viewport into the window, in pixel units.
		const int VW_HEIGHT = aspect_ratio*VW_WIDTH;
		const int VW_GAP = 30;

		// Create the Opengl objects for the planar images, as textured planes, each in a separate viewport:
		win3D.addTextMessage(30,-25-1*(VW_GAP+VW_HEIGHT),"Range data",TColorf(1,1,1), 1, MRPT_GLUT_BITMAP_HELVETICA_12 );
		viewRange = scene->createViewport("view2d_range");
		viewRange->setViewportPosition(5,-10-1*(VW_GAP+VW_HEIGHT), VW_WIDTH,VW_HEIGHT);

		win3D.addTextMessage(30, -25-2*(VW_GAP+VW_HEIGHT),"Intensity data",TColorf(1,1,1), 2, MRPT_GLUT_BITMAP_HELVETICA_12 );
		viewInt = scene->createViewport("view2d_int");
		viewInt->setViewportPosition(5, -10-2*(VW_GAP+VW_HEIGHT), VW_WIDTH,VW_HEIGHT );

		win3D.unlockAccess3DScene();
		win3D.repaint();
	}


	CObservation3DRangeScanPtr  last_obs;
	CObservationIMUPtr          last_obs_imu;

	while (win3D.isOpen() && !thrPar.quit)
	{
		CObservation3DRangeScanPtr possiblyNewObs = thrPar.new_obs.get();
		if (possiblyNewObs && possiblyNewObs->timestamp!=INVALID_TIMESTAMP &&
			(!last_obs  || possiblyNewObs->timestamp!=last_obs->timestamp ) )
		{
			// It IS a new observation:
			last_obs     = possiblyNewObs;
			last_obs_imu = thrPar.new_obs_imu.get();

			// Update visualization ---------------------------------------
			bool do_refresh = false;

			// Show ranges as 2D:
			if (last_obs->hasRangeImage )
			{
				mrpt::utils::CImage  img;

				// Normalize the image
				static CMatrixFloat  range2D;   // Static to save time allocating the matrix in every iteration
				range2D = last_obs->rangeImage * (1.0/ 5.0); //kinect.getMaxRange());

				img.setFromMatrix(range2D);

				win3D.get3DSceneAndLock();
					viewRange->setImageView_fast(img);
				win3D.unlockAccess3DScene();
				do_refresh=true;
			}

			// Show intensity image:
			if (last_obs->hasIntensityImage )
			{
				win3D.get3DSceneAndLock();
					viewInt->setImageView(last_obs->intensityImage); // This is not "_fast" since the intensity image is used below in the coloured point cloud.
				win3D.unlockAccess3DScene();
				do_refresh=true;
			}

			// Show 3D points:
			if (last_obs->hasPoints3D )
			{
#if !defined(VIEW_AS_OCTOMAP)
				// For alternative ways to generate the 3D point cloud, read:
				// http://www.mrpt.org/Generating_3D_point_clouds_from_RGB_D_observations
				CColouredPointsMap pntsMap;
				pntsMap.colorScheme.scheme = CColouredPointsMap::cmFromIntensityImage;
				pntsMap.loadFromRangeScan(*last_obs);

				win3D.get3DSceneAndLock();
					gl_points->loadFromPointsMap(&pntsMap);
				win3D.unlockAccess3DScene();
#else
				CColouredOctoMap  octoMap(0.10);
				octoMap.setVoxelColourMethod( CColouredOctoMap::INTEGRATE );
				octoMap.insertObservationPtr( last_obs );

				win3D.get3DSceneAndLock();
					octoMap.getAsOctoMapVoxels( *gl_voxels );
					gl_voxels->showVoxels(VOXEL_SET_FREESPACE, false);
				win3D.unlockAccess3DScene();
#endif
				do_refresh=true;
			}

			// Estimated grabbing rate:
			win3D.get3DSceneAndLock();
				win3D.addTextMessage(-100,-20, format("%.02f Hz", thrPar.Hz ), TColorf(1,1,1), 100, MRPT_GLUT_BITMAP_HELVETICA_18 );
			win3D.unlockAccess3DScene();

			// Do we have accelerometer data?
			if (last_obs_imu && last_obs_imu->dataIsPresent[IMU_X_ACC])
			{
				win3D.get3DSceneAndLock();
					win3D.addTextMessage(10,60,
						format("Acc: x=%.02f y=%.02f z=%.02f", last_obs_imu->rawMeasurements[IMU_X_ACC], last_obs_imu->rawMeasurements[IMU_Y_ACC], last_obs_imu->rawMeasurements[IMU_Z_ACC] ),
						TColorf(0,0,1), "mono", 10, mrpt::opengl::FILL, 102);
				win3D.unlockAccess3DScene();
				do_refresh=true;
			}

			// Force opengl repaint:
			if (do_refresh) win3D.repaint();

		} // end update visualization:


		// Process possible keyboard commands:
		// --------------------------------------
		if (win3D.keyHit() && thrPar.pushed_key==0)
		{
			const int key = tolower( win3D.getPushedKey() );

			switch(key)
			{
				// Some of the keys are processed in this thread:
				case 'o':
					win3D.setCameraZoom( win3D.getCameraZoom() * 1.2 );
					win3D.repaint();
					break;
				case 'i':
					win3D.setCameraZoom( win3D.getCameraZoom() / 1.2 );
					win3D.repaint();
					break;
				case '9':
					{
						// Save latest image (intensity or IR) to disk:
						static int cnt = 0;
						if (last_obs->hasIntensityImage )
						{
							const std::string s = mrpt::format("kinect_image_%04i.png",cnt++);
							std::cout << "Writing intensity/IR image to disk: " << s << std::endl;
							if (!last_obs->intensityImage.saveToFile(s))
								std::cerr << "(error writing file!)\n";
						}
					}
					break;
				// ...and the rest in the kinect thread:
				default:
					thrPar.pushed_key = key;
					break;
			};
		}

		win3D.get3DSceneAndLock();
		win3D.addTextMessage(10,10,
			format("'o'/'i'-zoom out/in, 'w'-tilt up,'s'-tilt down, mouse: orbit 3D,'c':Switch RGB/IR,'9':Save image,ESC: quit"),
				TColorf(0,0,1), "mono", 10, mrpt::opengl::FILL, 110);
		win3D.addTextMessage(10,35,
			format("Tilt angle: %.01f deg", thrPar.tilt_ang_deg),
				TColorf(0,0,1), "mono", 10, mrpt::opengl::FILL, 111);
		win3D.unlockAccess3DScene();

		mrpt::system::sleep(1);
	}


	cout << "Waiting for grabbing thread to exit...\n";
	thrPar.quit = true;
	mrpt::system::joinThread(thHandle);
	cout << "Bye!\n";
}
Пример #7
0
// ------------------------------------------------------
//				Test_KinectOnlineOffline
// ------------------------------------------------------
void Test_KinectOnlineOffline(bool is_online, const string &rawlog_file = string())
{
	// Launch grabbing thread:
	// --------------------------------------------------------
	TThreadParam thrPar(
		is_online,
		rawlog_file,
		false // generate_3D_pointcloud_in_this_thread -> Don't, we'll do it in this main thread.
		);

	mrpt::system::TThreadHandle thHandle= mrpt::system::createThreadRef(thread_grabbing ,thrPar);

	// Wait until data stream starts so we can say for sure the sensor has been initialized OK:
	cout << "Waiting for sensor initialization...\n";
	do {
		CObservation3DRangeScanPtr newObs = thrPar.new_obs.get();
		if (newObs && newObs->timestamp!=INVALID_TIMESTAMP)
				break;
		else 	mrpt::system::sleep(10);
	} while (!thrPar.quit);

	// Check error condition:
	if (thrPar.quit) return;
	cout << "OK! Sensor started to emit observations.\n";

	// Create window and prepare OpenGL object in the scene:
	// --------------------------------------------------------
	mrpt::gui::CDisplayWindow3D  win3D("Kinect 3D view",800,600);

	win3D.setCameraAzimuthDeg(140);
	win3D.setCameraElevationDeg(20);
	win3D.setCameraZoom(8.0);
	win3D.setFOV(90);
	win3D.setCameraPointingToPoint(2.5,0,0);

	mrpt::opengl::CPointCloudColouredPtr gl_points = mrpt::opengl::CPointCloudColoured::Create();
	gl_points->setPointSize(2.5);

	opengl::COpenGLViewportPtr viewInt; // Extra viewports for the RGB images.
	{
		mrpt::opengl::COpenGLScenePtr &scene = win3D.get3DSceneAndLock();

		// Create the Opengl object for the point cloud:
		scene->insert( gl_points );
		scene->insert( mrpt::opengl::CGridPlaneXY::Create() );
		scene->insert( mrpt::opengl::stock_objects::CornerXYZ() );

		const double aspect_ratio =  480.0 / 640.0;
		const int VW_WIDTH = 400;	// Size of the viewport into the window, in pixel units.
		const int VW_HEIGHT = aspect_ratio*VW_WIDTH;

		// Create an extra opengl viewport for the RGB image:
		viewInt = scene->createViewport("view2d_int");
		viewInt->setViewportPosition(5, 30, VW_WIDTH,VW_HEIGHT );
		win3D.addTextMessage(10, 30+VW_HEIGHT+10,"Intensity data",TColorf(1,1,1), 2, MRPT_GLUT_BITMAP_HELVETICA_12 );

		win3D.addTextMessage(5,5,
			format("'o'/'i'-zoom out/in, ESC: quit"),
				TColorf(0,0,1), 110, MRPT_GLUT_BITMAP_HELVETICA_18 );


		win3D.unlockAccess3DScene();
		win3D.repaint();
	}



	mrpt::system::TTimeStamp  last_obs_tim = INVALID_TIMESTAMP;

	while (win3D.isOpen() && !thrPar.quit)
	{
		CObservation3DRangeScanPtr newObs = thrPar.new_obs.get();
		if (newObs && newObs->timestamp!=INVALID_TIMESTAMP &&
			newObs->timestamp!=last_obs_tim )
		{
			// It IS a new observation:
			last_obs_tim = newObs->timestamp;

			// Update visualization ---------------------------------------

			win3D.get3DSceneAndLock();

			// Estimated grabbing rate:
			win3D.addTextMessage(-350,-13, format("Timestamp: %s", mrpt::system::dateTimeLocalToString(last_obs_tim).c_str()), TColorf(0.6,0.6,0.6),"mono",10,mrpt::opengl::FILL, 100);
			win3D.addTextMessage(-100,-30, format("%.02f Hz", thrPar.Hz ), TColorf(1,1,1),"mono",10,mrpt::opengl::FILL, 101);

			// Show intensity image:
			if (newObs->hasIntensityImage )
			{
				viewInt->setImageView(newObs->intensityImage); // This is not "_fast" since the intensity image may be needed later on.
			}
			win3D.unlockAccess3DScene();

			// -------------------------------------------------------
			//           Create 3D points from RGB+D data
			//
			// There are several methods to do this.
			//  Switch the #if's to select among the options:
			// See also: http://www.mrpt.org/Generating_3D_point_clouds_from_RGB_D_observations
			// -------------------------------------------------------
			if (newObs->hasRangeImage)
			{
				static mrpt::utils::CTimeLogger logger;
				logger.enter("RGBD->3D");

// Pathway: RGB+D --> PCL <PointXYZ> --> XYZ opengl
#if 0
				static pcl::PointCloud<pcl::PointXYZ> cloud;
				logger.enter("RGBD->3D.projectInto");
				newObs->project3DPointsFromDepthImageInto(cloud, false /* without obs.sensorPose */);
				logger.leave("RGBD->3D.projectInto");

				win3D.get3DSceneAndLock();
				logger.enter("RGBD->3D.load in OpenGL");
					gl_points->loadFromPointsMap(&cloud);
				logger.leave("RGBD->3D.load in OpenGL");
				win3D.unlockAccess3DScene();
#endif

// Pathway: RGB+D --> PCL <PointXYZRGB> --> XYZ+RGB opengl
#if 0
				static pcl::PointCloud<pcl::PointXYZRGB> cloud;
				logger.enter("RGBD->3D.projectInto");
				newObs->project3DPointsFromDepthImageInto(cloud, false /* without obs.sensorPose */);
				logger.leave("RGBD->3D.projectInto");

				win3D.get3DSceneAndLock();
				logger.enter("RGBD->3D.load in OpenGL");
					gl_points->loadFromPointsMap(&cloud);
				logger.leave("RGBD->3D.load in OpenGL");
				win3D.unlockAccess3DScene();
#endif

// Pathway: RGB+D --> XYZ+RGB opengl
#if 1
				win3D.get3DSceneAndLock();
				logger.enter("RGBD->3D.projectInto");
					newObs->project3DPointsFromDepthImageInto(*gl_points, false /* without obs.sensorPose */);
				logger.leave("RGBD->3D.projectInto");
				win3D.unlockAccess3DScene();
#endif

// Pathway: RGB+D --> XYZ+RGB opengl (With a 6D global pose of the robot)
#if 0
				const CPose3D globalPose(1,2,3,DEG2RAD(10),DEG2RAD(20),DEG2RAD(30));
				win3D.get3DSceneAndLock();
				logger.enter("RGBD->3D.projectInto");
					newObs->project3DPointsFromDepthImageInto(*gl_points, false /* without obs.sensorPose */, &globalPose);
				logger.leave("RGBD->3D.projectInto");
				win3D.unlockAccess3DScene();
#endif

// Pathway: RGB+D --> internal local XYZ pointcloud --> XYZ+RGB point cloud map --> XYZ+RGB opengl
#if 0
				// Project 3D points:
				if (!newObs->hasPoints3D)
				{
				logger.enter("RGBD->3D.projectInto");
					newObs->project3DPointsFromDepthImage();
				logger.leave("RGBD->3D.projectInto");
				}

				CColouredPointsMap pntsMap;
				pntsMap.colorScheme.scheme = CColouredPointsMap::cmFromIntensityImage;
				pntsMap.loadFromRangeScan(*newObs);

				win3D.get3DSceneAndLock();
				logger.enter("RGBD->3D.load in OpenGL");
					gl_points->loadFromPointsMap(&pntsMap);
				logger.leave("RGBD->3D.load in OpenGL");
				win3D.unlockAccess3DScene();
#endif

				logger.leave("RGBD->3D");
			}

			win3D.repaint();
		} // end update visualization:


		// Process possible keyboard commands:
		// --------------------------------------
		if (win3D.keyHit())
		{
			const int key = tolower( win3D.getPushedKey() );

			switch(key)
			{
				// Some of the keys are processed in this thread:
				case 'o':
					win3D.setCameraZoom( win3D.getCameraZoom() * 1.2 );
					win3D.repaint();
					break;
				case 'i':
					win3D.setCameraZoom( win3D.getCameraZoom() / 1.2 );
					win3D.repaint();
					break;
				case 27: // ESC
					thrPar.quit = true;
					break;
				default:
					break;
			};
		}

		mrpt::system::sleep(1);
	}


	cout << "Waiting for grabbing thread to exit...\n";
	thrPar.quit = true;
	mrpt::system::joinThread(thHandle);
	cout << "Bye!\n";
}
Пример #8
0
int main ( int argc, char** argv )
{
	openni::Status rc = openni::STATUS_OK;
	openni::Device		device;
	openni::VideoMode	options;
	openni::VideoStream depth, rgb;

	//									Device is openned
	//=======================================================================================
	const char* deviceURI = openni::ANY_DEVICE;
	if (argc > 1)
		deviceURI = argv[1];

	rc = openni::OpenNI::initialize();
	if (rc != openni::STATUS_OK) { printf("After initialization:\n %s\n", openni::OpenNI::getExtendedError()); }
	rc = device.open(deviceURI);
	
	//cout << endl << "Do we have IR sensor? " << device.hasSensor(openni::SENSOR_IR);
	//cout << endl << "Do we have RGB sensor? " << device.hasSensor(openni::SENSOR_COLOR);
	//cout << endl << "Do we have Depth sensor? " << device.hasSensor(openni::SENSOR_DEPTH);

	if (rc != openni::STATUS_OK)
	{
		printf("Device open failed:\n%s\n", openni::OpenNI::getExtendedError());
		openni::OpenNI::shutdown();
		return 1;
	}

	//							Create RGB and Depth channels
	//========================================================================================
	rc = depth.create(device, openni::SENSOR_DEPTH);
	rc = rgb.create(device, openni::SENSOR_COLOR);


	//							Configure video properties
	//========================================================================================
	int width = 640, height = 480;

	rc = device.setImageRegistrationMode(openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR);
	//rc = device.setImageRegistrationMode(openni::IMAGE_REGISTRATION_OFF);
	
	openni::VideoMode vm;

	options = rgb.getVideoMode();
	printf("\nInitial resolution RGB (%d, %d)", options.getResolutionX(), options.getResolutionY());
	options.setResolution(width,height);
	rc = rgb.setVideoMode(options);
	rc = rgb.setMirroringEnabled(false);

	options = depth.getVideoMode();
	printf("\nInitial resolution Depth(%d, %d)", options.getResolutionX(), options.getResolutionY());
	options.setResolution(width,height);
	rc = depth.setVideoMode(options);
	rc = depth.setMirroringEnabled(false);

	options = depth.getVideoMode();
	printf("\nNew resolution (%d, %d) \n", options.getResolutionX(), options.getResolutionY());


	rc = depth.start();
	if (rc != openni::STATUS_OK)
	{
		printf("Couldn't start depth stream:\n%s\n", openni::OpenNI::getExtendedError());
		depth.destroy();
	}

	rc = rgb.start();
	if (rc != openni::STATUS_OK)
	{
		printf("Couldn't start rgb stream:\n%s\n", openni::OpenNI::getExtendedError());
		rgb.destroy();
	}

	if (rc != openni::STATUS_OK)
	{
		openni::OpenNI::shutdown();
		return 3;
	}

	if (!depth.isValid() || !rgb.isValid())
	{
		printf("No valid streams. Exiting\n");
		openni::OpenNI::shutdown();
		return 2;
	}

	//						Uncomment this to see the video modes available
	//========================================================================================
	////Depth modes
	//for(unsigned int i = 0; i<depth.getSensorInfo().getSupportedVideoModes().getSize(); i++)
	//{
	//	vm = depth.getSensorInfo().getSupportedVideoModes()[i];
	//	printf("\n Depth mode %d: %d x %d, fps - %d Hz, pixel format - ",i, vm.getResolutionX(), vm.getResolutionY(), vm.getFps());
	//	cout << vm.getPixelFormat();
	//	if ((vm.getResolutionX() == width)&&(vm.getPixelFormat() == openni::PIXEL_FORMAT_DEPTH_1_MM))
	//		rc = depth.setVideoMode(vm);
	//}
	
	////Colour modes
	//for(unsigned int i = 0; i<rgb.getSensorInfo().getSupportedVideoModes().getSize(); i++)
	//{
	//	vm = rgb.getSensorInfo().getSupportedVideoModes()[i];
	//	printf("\n RGB mode %d: %d x %d, fps - %d Hz, pixel format - ",i, vm.getResolutionX(), vm.getResolutionY(), vm.getFps());
	//	cout << vm.getPixelFormat();
	//}

    //					Uncomment this to allow for closer points detection
	//========================================================================================
	//bool CloseRange;
	//depth.setProperty(XN_STREAM_PROPERTY_CLOSE_RANGE, 1);
	//depth.getProperty(XN_STREAM_PROPERTY_CLOSE_RANGE, &CloseRange);
	//printf("\nClose range: %s", CloseRange?"On":"Off");	
	

	//										Create scene
	//========================================================================================
	gui::CDisplayWindow3D window;
	opengl::COpenGLScenePtr	scene;
	mrpt::global_settings::OCTREE_RENDER_MAX_POINTS_PER_NODE = 1000000;
	window.setWindowTitle("RGB-D camera frame");
	window.resize(800,600);
	window.setPos(500,50);
	window.setCameraZoom(5);
	window.setCameraAzimuthDeg(180);
	window.setCameraElevationDeg(5);
	scene = window.get3DSceneAndLock();

	opengl::CPointCloudColouredPtr kinectp = opengl::CPointCloudColoured::Create();
	kinectp->enablePointSmooth(true);
	kinectp->setPointSize(2);
	scene->insert( kinectp );

	opengl::CSetOfObjectsPtr reference = opengl::stock_objects::CornerXYZ();
	reference->setScale(0.4);
	scene->insert( reference );

	window.unlockAccess3DScene();
	window.addTextMessage(5,5, format("Push any key to exit"));
	window.repaint();

	//							Grab frames continuously and show
	//========================================================================================
	CColouredPointsMap points;
	openni::VideoFrameRef framed, framergb;
	
	while (!window.keyHit())	//Push any key to exit
	{
		points.clear();
		depth.readFrame(&framed);
		rgb.readFrame(&framergb);

		if ((framed.getWidth() != framergb.getWidth()) || (framed.getHeight() != framergb.getHeight()))
			cout << endl << "Both frames don't have the same size.";

		else
		{
			//Read one frame
			const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)framed.getData();
			const openni::RGB888Pixel* pRgbRow = (const openni::RGB888Pixel*)framergb.getData();
			int rowSize = framed.getStrideInBytes() / sizeof(openni::DepthPixel);

			float x, y, z, inv_f = float(640/width)/525.0f;

			for (int yc = 0; yc < framed.getHeight(); ++yc)
			{
				const openni::DepthPixel* pDepth = pDepthRow;
				const openni::RGB888Pixel* pRgb = pRgbRow;
				for (int xc = 0; xc < framed.getWidth(); ++xc, ++pDepth, ++pRgb)
				{
					z = 0.001*(*pDepth);
					x = -(xc - 0.5*(width-1))*z*inv_f;
					y = -(yc - 0.5*(height-1))*z*inv_f;
					points.insertPoint(z, x, y, 0.0039*pRgb->r, 0.0039*pRgb->g, 0.0039*pRgb->b);
				}
				pDepthRow += rowSize;
				pRgbRow += rowSize;
			}
		}

		scene = window.get3DSceneAndLock();
		kinectp->loadFromPointsMap<mrpt::maps::CColouredPointsMap> (&points);
		system::sleep(5);
		window.unlockAccess3DScene();
		window.repaint();
	}

	depth.destroy();
	rgb.destroy();
	openni::OpenNI::shutdown();

	return 0;
}