Exemplo n.º 1
0
void rgb_cb(freenect_device *dev, void *img_data, uint32_t timestamp)
{
	CKinect *obj = reinterpret_cast<CKinect*>(freenect_get_user(dev));
	const freenect_frame_mode frMode = freenect_get_current_video_mode(dev);

	// Update of the timestamps at the end:
	CObservation3DRangeScan &obs = obj->internal_latest_obs();
	mrpt::synch::CCriticalSectionLocker lock( &obj->internal_latest_obs_cs() );

#ifdef KINECT_PROFILE_MEM_ALLOC
	alloc_tim.enter("depth_rgb loadFromMemoryBuffer");
#endif

	obs.hasIntensityImage = true;
	if (obj->getVideoChannel()==CKinect::VIDEO_CHANNEL_RGB)
	{
	     // Color image: We asked for Bayer data, so we can decode it outselves here
	     //  and avoid having to reorder Green<->Red channels, as would be needed with
	     //  the RGB image from freenect.
          obs.intensityImageChannel = mrpt::slam::CObservation3DRangeScan::CH_VISIBLE;
          obs.intensityImage.resize(frMode.width, frMode.height, CH_RGB, true /* origin=top-left */ );

#if MRPT_HAS_OPENCV
#	if MRPT_OPENCV_VERSION_NUM<0x200
		  // Version for VERY OLD OpenCV versions:
		  IplImage *src_img_bayer = cvCreateImageHeader(cvSize(frMode.width,frMode.height),8,1);
		  src_img_bayer->imageDataOrigin = reinterpret_cast<char*>(img_data);
		  src_img_bayer->imageData = src_img_bayer->imageDataOrigin;
		  src_img_bayer->widthStep = frMode.width;

		  IplImage *dst_img_RGB = obs.intensityImage.getAs<IplImage>();

          // Decode Bayer image:
		  cvCvtColor(src_img_bayer, dst_img_RGB, CV_BayerGB2BGR);

#	else
		  // Version for modern OpenCV:
          const cv::Mat  src_img_bayer( frMode.height, frMode.width, CV_8UC1, img_data, frMode.width );

          cv::Mat        dst_img_RGB= cv::cvarrToMat( obs.intensityImage.getAs<IplImage>(), false /* dont copy buffers */ );

          // Decode Bayer image:
          cv::cvtColor(src_img_bayer, dst_img_RGB, CV_BayerGB2BGR);
#	endif
#else
     THROW_EXCEPTION("Need building with OpenCV!")
#endif

	}
Exemplo n.º 2
0
// ========  GLOBAL CALLBACK FUNCTIONS ========
void depth_cb(freenect_device* dev, void* v_depth, uint32_t timestamp)
{
	const freenect_frame_mode frMode = freenect_get_current_video_mode(dev);

	uint16_t* depth = reinterpret_cast<uint16_t*>(v_depth);

	CKinect* obj = reinterpret_cast<CKinect*>(freenect_get_user(dev));

	// Update of the timestamps at the end:
	std::lock_guard<std::mutex> lock(obj->internal_latest_obs_cs());
	CObservation3DRangeScan& obs = obj->internal_latest_obs();

	obs.hasRangeImage = true;
	obs.range_is_depth = true;

#ifdef KINECT_PROFILE_MEM_ALLOC
	alloc_tim.enter("depth_cb alloc");
#endif

	// This method will try to exploit memory pooling if possible:
	obs.rangeImage_setSize(frMode.height, frMode.width);

#ifdef KINECT_PROFILE_MEM_ALLOC
	alloc_tim.leave("depth_cb alloc");
#endif

	const CKinect::TDepth2RangeArray& r2m = obj->getRawDepth2RangeConversion();
	for (int r = 0; r < frMode.height; r++)
		for (int c = 0; c < frMode.width; c++)
		{
			// For now, quickly save the depth as it comes from the sensor,
			// it'll
			//  transformed later on in getNextObservation()
			const uint16_t v = *depth++;
			obs.rangeImage.coeffRef(r, c) = r2m[v & KINECT_RANGES_TABLE_MASK];
		}
	obj->internal_tim_latest_depth() = timestamp;
}
Exemplo n.º 3
0
void thread_grabbing(TThreadParam& p)
{
	try
	{
		CKinect kinect;

		// Set params:
		// kinect.enableGrab3DPoints(true);
		// kinect.enablePreviewRGB(true);
		//...
		const std::string cfgFile = "kinect_calib.cfg";
		if (mrpt::system::fileExists(cfgFile))
		{
			cout << "Loading calibration from: " << cfgFile << endl;
			kinect.loadConfig(mrpt::config::CConfigFile(cfgFile), "KINECT");
		}
		else
			cerr << "Warning: Calibration file [" << cfgFile
				 << "] not found -> Using default params. "
					"This may cause inaccurate 3D point clouds.\n Checkout the "
					"kinect-stereo-calib application:\n "
					"http://www.google.com/"
					"search?q=kinect-stereo-calib%20mrpt&btnI \n";

		// Open:
		cout << "Calling CKinect::initialize()...";
		kinect.initialize();
		cout << "OK\n";

		CTicTac tictac;
		int nImgs = 0;
		bool there_is_obs = true, hard_error = false;

		while (!hard_error && !p.quit)
		{
			// Grab new observation from the camera:
			CObservation3DRangeScan::Ptr obs = mrpt::make_aligned_shared<
				CObservation3DRangeScan>();  // Smart pointer to observation
			kinect.getNextObservation(*obs, there_is_obs, hard_error);

			if (!hard_error && there_is_obs)
			{
				std::atomic_store(&p.new_obs, obs);
			}

			if (p.pushed_key != 0)
			{
				switch (p.pushed_key)
				{
					case 's':
						p.tilt_ang_deg -= 2;
						if (p.tilt_ang_deg < -30) p.tilt_ang_deg = -30;
						kinect.setTiltAngleDegrees(p.tilt_ang_deg);
						break;
					case 'w':
						p.tilt_ang_deg += 2;
						if (p.tilt_ang_deg > 30) p.tilt_ang_deg = 30;
						kinect.setTiltAngleDegrees(p.tilt_ang_deg);
						break;
					case 27:
						p.quit = true;
						break;
				}

				// Clear pushed key flag:
				p.pushed_key = 0;
			}

			nImgs++;
			if (nImgs > 10)
			{
				p.Hz = nImgs / tictac.Tac();
				nImgs = 0;
				tictac.Tic();
			}
		}
	}
	catch (std::exception& e)
	{
		cout << "Exception in Kinect thread: " << e.what() << endl;
		p.quit = true;
	}
}
Exemplo n.º 4
0
void thread_grabbing(TThreadParam& p)
{
	try
	{
		CKinect kinect;

		// Set params:
		// kinect.enableGrab3DPoints(true);
		// kinect.enablePreviewRGB(true);
		//...
		const std::string cfgFile = "kinect_calib.cfg";
		if (mrpt::system::fileExists(cfgFile))
		{
			cout << "Loading calibration from: " << cfgFile << endl;
			kinect.loadConfig(mrpt::config::CConfigFile(cfgFile), "KINECT");
		}
		else
			cerr << "Warning: Calibration file [" << cfgFile
				 << "] not found -> Using default params.\n";

		// Open:
		cout << "Calling CKinect::initialize()...";
		kinect.initialize();
		cout << "OK\n";

		mrpt::system::CTicTac tictac;
		int nImgs = 0;
		bool there_is_obs = true, hard_error = false;

		while (!hard_error && !p.quit)
		{
			// Grab new observation from the camera:
			CObservation3DRangeScan::Ptr obs = mrpt::make_aligned_shared<
				CObservation3DRangeScan>();  // Smart pointers to observations
			CObservationIMU::Ptr obs_imu =
				mrpt::make_aligned_shared<CObservationIMU>();

			kinect.getNextObservation(*obs, *obs_imu, there_is_obs, hard_error);

			if (!hard_error && there_is_obs)
			{
				std::atomic_store(&p.new_obs, obs);
				std::atomic_store(&p.new_obs_imu, obs_imu);
			}

			if (p.pushed_key != 0)
			{
				switch (p.pushed_key)
				{
					case 's':
						p.tilt_ang_deg -= 1;
						if (p.tilt_ang_deg < -31) p.tilt_ang_deg = -31;
						kinect.setTiltAngleDegrees(p.tilt_ang_deg);
						break;
					case 'w':
						p.tilt_ang_deg += 1;
						if (p.tilt_ang_deg > 31) p.tilt_ang_deg = 31;
						kinect.setTiltAngleDegrees(p.tilt_ang_deg);
						break;
					case 'c':
						// Switch video input:
						kinect.setVideoChannel(
							kinect.getVideoChannel() ==
									CKinect::VIDEO_CHANNEL_RGB
								? CKinect::VIDEO_CHANNEL_IR
								: CKinect::VIDEO_CHANNEL_RGB);
						break;
					case 27:
						p.quit = true;
						break;
				}

				// Clear pushed key flag:
				p.pushed_key = 0;
			}

			nImgs++;
			if (nImgs > 10)
			{
				p.Hz = nImgs / tictac.Tac();
				nImgs = 0;
				tictac.Tic();
			}
		}
	}
	catch (const std::exception& e)
	{
		cout << "Exception in Kinect thread: " << mrpt::exception_to_str(e)
			 << endl;
		p.quit = true;
	}
}