コード例 #1
0
ファイル: CKinect.cpp プロジェクト: KongWeibin/mrpt
void rgb_cb(freenect_device *dev, void *img_data, uint32_t timestamp)
{
	CKinect *obj = reinterpret_cast<CKinect*>(freenect_get_user(dev));
	const freenect_frame_mode frMode = freenect_get_current_video_mode(dev);

	// Update of the timestamps at the end:
	CObservation3DRangeScan &obs = obj->internal_latest_obs();
	mrpt::synch::CCriticalSectionLocker lock( &obj->internal_latest_obs_cs() );

#ifdef KINECT_PROFILE_MEM_ALLOC
	alloc_tim.enter("depth_rgb loadFromMemoryBuffer");
#endif

	obs.hasIntensityImage = true;
	if (obj->getVideoChannel()==CKinect::VIDEO_CHANNEL_RGB)
	{
	     // Color image: We asked for Bayer data, so we can decode it outselves here
	     //  and avoid having to reorder Green<->Red channels, as would be needed with
	     //  the RGB image from freenect.
          obs.intensityImageChannel = mrpt::slam::CObservation3DRangeScan::CH_VISIBLE;
          obs.intensityImage.resize(frMode.width, frMode.height, CH_RGB, true /* origin=top-left */ );

#if MRPT_HAS_OPENCV
#	if MRPT_OPENCV_VERSION_NUM<0x200
		  // Version for VERY OLD OpenCV versions:
		  IplImage *src_img_bayer = cvCreateImageHeader(cvSize(frMode.width,frMode.height),8,1);
		  src_img_bayer->imageDataOrigin = reinterpret_cast<char*>(img_data);
		  src_img_bayer->imageData = src_img_bayer->imageDataOrigin;
		  src_img_bayer->widthStep = frMode.width;

		  IplImage *dst_img_RGB = obs.intensityImage.getAs<IplImage>();

          // Decode Bayer image:
		  cvCvtColor(src_img_bayer, dst_img_RGB, CV_BayerGB2BGR);

#	else
		  // Version for modern OpenCV:
          const cv::Mat  src_img_bayer( frMode.height, frMode.width, CV_8UC1, img_data, frMode.width );

          cv::Mat        dst_img_RGB= cv::cvarrToMat( obs.intensityImage.getAs<IplImage>(), false /* dont copy buffers */ );

          // Decode Bayer image:
          cv::cvtColor(src_img_bayer, dst_img_RGB, CV_BayerGB2BGR);
#	endif
#else
     THROW_EXCEPTION("Need building with OpenCV!")
#endif

	}
コード例 #2
0
ファイル: CKinect.cpp プロジェクト: bergercookie/mrpt
// ========  GLOBAL CALLBACK FUNCTIONS ========
void depth_cb(freenect_device* dev, void* v_depth, uint32_t timestamp)
{
	const freenect_frame_mode frMode = freenect_get_current_video_mode(dev);

	uint16_t* depth = reinterpret_cast<uint16_t*>(v_depth);

	CKinect* obj = reinterpret_cast<CKinect*>(freenect_get_user(dev));

	// Update of the timestamps at the end:
	std::lock_guard<std::mutex> lock(obj->internal_latest_obs_cs());
	CObservation3DRangeScan& obs = obj->internal_latest_obs();

	obs.hasRangeImage = true;
	obs.range_is_depth = true;

#ifdef KINECT_PROFILE_MEM_ALLOC
	alloc_tim.enter("depth_cb alloc");
#endif

	// This method will try to exploit memory pooling if possible:
	obs.rangeImage_setSize(frMode.height, frMode.width);

#ifdef KINECT_PROFILE_MEM_ALLOC
	alloc_tim.leave("depth_cb alloc");
#endif

	const CKinect::TDepth2RangeArray& r2m = obj->getRawDepth2RangeConversion();
	for (int r = 0; r < frMode.height; r++)
		for (int c = 0; c < frMode.width; c++)
		{
			// For now, quickly save the depth as it comes from the sensor,
			// it'll
			//  transformed later on in getNextObservation()
			const uint16_t v = *depth++;
			obs.rangeImage.coeffRef(r, c) = r2m[v & KINECT_RANGES_TABLE_MASK];
		}
	obj->internal_tim_latest_depth() = timestamp;
}