void rgb_cb(freenect_device *dev, void *img_data, uint32_t timestamp) { CKinect *obj = reinterpret_cast<CKinect*>(freenect_get_user(dev)); const freenect_frame_mode frMode = freenect_get_current_video_mode(dev); // Update of the timestamps at the end: CObservation3DRangeScan &obs = obj->internal_latest_obs(); mrpt::synch::CCriticalSectionLocker lock( &obj->internal_latest_obs_cs() ); #ifdef KINECT_PROFILE_MEM_ALLOC alloc_tim.enter("depth_rgb loadFromMemoryBuffer"); #endif obs.hasIntensityImage = true; if (obj->getVideoChannel()==CKinect::VIDEO_CHANNEL_RGB) { // Color image: We asked for Bayer data, so we can decode it outselves here // and avoid having to reorder Green<->Red channels, as would be needed with // the RGB image from freenect. obs.intensityImageChannel = mrpt::slam::CObservation3DRangeScan::CH_VISIBLE; obs.intensityImage.resize(frMode.width, frMode.height, CH_RGB, true /* origin=top-left */ ); #if MRPT_HAS_OPENCV # if MRPT_OPENCV_VERSION_NUM<0x200 // Version for VERY OLD OpenCV versions: IplImage *src_img_bayer = cvCreateImageHeader(cvSize(frMode.width,frMode.height),8,1); src_img_bayer->imageDataOrigin = reinterpret_cast<char*>(img_data); src_img_bayer->imageData = src_img_bayer->imageDataOrigin; src_img_bayer->widthStep = frMode.width; IplImage *dst_img_RGB = obs.intensityImage.getAs<IplImage>(); // Decode Bayer image: cvCvtColor(src_img_bayer, dst_img_RGB, CV_BayerGB2BGR); # else // Version for modern OpenCV: const cv::Mat src_img_bayer( frMode.height, frMode.width, CV_8UC1, img_data, frMode.width ); cv::Mat dst_img_RGB= cv::cvarrToMat( obs.intensityImage.getAs<IplImage>(), false /* dont copy buffers */ ); // Decode Bayer image: cv::cvtColor(src_img_bayer, dst_img_RGB, CV_BayerGB2BGR); # endif #else THROW_EXCEPTION("Need building with OpenCV!") #endif }
void thread_grabbing(TThreadParam& p) { try { CKinect kinect; // Set params: // kinect.enableGrab3DPoints(true); // kinect.enablePreviewRGB(true); //... const std::string cfgFile = "kinect_calib.cfg"; if (mrpt::system::fileExists(cfgFile)) { cout << "Loading calibration from: " << cfgFile << endl; kinect.loadConfig(mrpt::config::CConfigFile(cfgFile), "KINECT"); } else cerr << "Warning: Calibration file [" << cfgFile << "] not found -> Using default params.\n"; // Open: cout << "Calling CKinect::initialize()..."; kinect.initialize(); cout << "OK\n"; mrpt::system::CTicTac tictac; int nImgs = 0; bool there_is_obs = true, hard_error = false; while (!hard_error && !p.quit) { // Grab new observation from the camera: CObservation3DRangeScan::Ptr obs = mrpt::make_aligned_shared< CObservation3DRangeScan>(); // Smart pointers to observations CObservationIMU::Ptr obs_imu = mrpt::make_aligned_shared<CObservationIMU>(); kinect.getNextObservation(*obs, *obs_imu, there_is_obs, hard_error); if (!hard_error && there_is_obs) { std::atomic_store(&p.new_obs, obs); std::atomic_store(&p.new_obs_imu, obs_imu); } if (p.pushed_key != 0) { switch (p.pushed_key) { case 's': p.tilt_ang_deg -= 1; if (p.tilt_ang_deg < -31) p.tilt_ang_deg = -31; kinect.setTiltAngleDegrees(p.tilt_ang_deg); break; case 'w': p.tilt_ang_deg += 1; if (p.tilt_ang_deg > 31) p.tilt_ang_deg = 31; kinect.setTiltAngleDegrees(p.tilt_ang_deg); break; case 'c': // Switch video input: kinect.setVideoChannel( kinect.getVideoChannel() == CKinect::VIDEO_CHANNEL_RGB ? CKinect::VIDEO_CHANNEL_IR : CKinect::VIDEO_CHANNEL_RGB); break; case 27: p.quit = true; break; } // Clear pushed key flag: p.pushed_key = 0; } nImgs++; if (nImgs > 10) { p.Hz = nImgs / tictac.Tac(); nImgs = 0; tictac.Tic(); } } } catch (const std::exception& e) { cout << "Exception in Kinect thread: " << mrpt::exception_to_str(e) << endl; p.quit = true; } }