/** @brief Process and/or display DavidSDKGrabber image * @param[in] image davidSDK image */ void grabberCallback (const boost::shared_ptr<pcl::PCLImage>& image) { unsigned char *image_array = reinterpret_cast<unsigned char *> (&image->data[0]); int type = getOpenCVType (image->encoding); cv::Mat cv_image (image->height, image->width, type, image_array); cv::imshow ("davidSDK images", cv_image); cv::waitKey (5); }
/** @brief Process and/or display Ensenso grabber images * @param[in] cloud The Ensenso point cloud * @param[in] images Pair of Ensenso images (raw or with overlay) * @warning Image type changes if a calibration pattern is discovered/lost; * check @c images->first.encoding */ void grabberCallback (const boost::shared_ptr<PointCloudT>& cloud, const boost::shared_ptr<PairOfImages>& images) { viewer_ptr->showCloud (cloud); unsigned char *l_image_array = reinterpret_cast<unsigned char *> (&images->first.data[0]); unsigned char *r_image_array = reinterpret_cast<unsigned char *> (&images->second.data[0]); std::cout << "Encoding: " << images->first.encoding << std::endl; int type = getOpenCVType (images->first.encoding); cv::Mat l_image (images->first.height, images->first.width, type, l_image_array); cv::Mat r_image (images->first.height, images->first.width, type, r_image_array); cv::Mat im (images->first.height, images->first.width * 2, type); im.adjustROI (0, 0, 0, -images->first.width); l_image.copyTo (im); im.adjustROI (0, 0, -images->first.width, images->first.width); r_image.copyTo (im); im.adjustROI (0, 0, images->first.width, 0); cv::imshow ("Ensenso images", im); cv::waitKey (10); }
void pcl::EnsensoGrabber::processGrabbing () { bool continue_grabbing = running_; while (continue_grabbing) { try { // Publish cloud / images if (num_slots<sig_cb_ensenso_point_cloud> () > 0 || num_slots<sig_cb_ensenso_images> () > 0 || num_slots<sig_cb_ensenso_point_cloud_images> () > 0) { pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>); boost::shared_ptr<PairOfImages> images (new PairOfImages); fps_mutex_.lock (); frequency_.event (); fps_mutex_.unlock (); NxLibCommand (cmdCapture).execute (); double timestamp; camera_[itmImages][itmRaw][itmLeft].getBinaryDataInfo (0, 0, 0, 0, 0, ×tamp); // Gather images if (num_slots<sig_cb_ensenso_images> () > 0 || num_slots<sig_cb_ensenso_point_cloud_images> () > 0) { // Rectify images NxLibCommand (cmdRectifyImages).execute (); int width, height, channels, bpe; bool isFlt, collected_pattern = false; try // Try to collect calibration pattern, if not possible, publish RAW images instead { NxLibCommand collect_pattern (cmdCollectPattern); collect_pattern.parameters ()[itmBuffer].set (false); // Do NOT store the pattern into the buffer! collect_pattern.execute (); collected_pattern = true; } catch (const NxLibException &ex) { // We failed to collect the pattern but the RAW images are available! } if (collected_pattern) { camera_[itmImages][itmWithOverlay][itmLeft].getBinaryDataInfo (&width, &height, &channels, &bpe, &isFlt, 0); images->first.header.stamp = images->second.header.stamp = getPCLStamp (timestamp); images->first.width = images->second.width = width; images->first.height = images->second.height = height; images->first.data.resize (width * height * sizeof(float)); images->second.data.resize (width * height * sizeof(float)); images->first.encoding = images->second.encoding = getOpenCVType (channels, bpe, isFlt); camera_[itmImages][itmWithOverlay][itmLeft].getBinaryData (images->first.data.data (), images->first.data.size (), 0, 0); camera_[itmImages][itmWithOverlay][itmRight].getBinaryData (images->second.data.data (), images->second.data.size (), 0, 0); } else { camera_[itmImages][itmRaw][itmLeft].getBinaryDataInfo (&width, &height, &channels, &bpe, &isFlt, 0); images->first.header.stamp = images->second.header.stamp = getPCLStamp (timestamp); images->first.width = images->second.width = width; images->first.height = images->second.height = height; images->first.data.resize (width * height * sizeof(float)); images->second.data.resize (width * height * sizeof(float)); images->first.encoding = images->second.encoding = getOpenCVType (channels, bpe, isFlt); camera_[itmImages][itmRaw][itmLeft].getBinaryData (images->first.data.data (), images->first.data.size (), 0, 0); camera_[itmImages][itmRaw][itmRight].getBinaryData (images->second.data.data (), images->second.data.size (), 0, 0); } } // Gather point cloud if (num_slots<sig_cb_ensenso_point_cloud> () > 0 || num_slots<sig_cb_ensenso_point_cloud_images> () > 0) { // Stereo matching task NxLibCommand (cmdComputeDisparityMap).execute (); // Convert disparity map into XYZ data for each pixel NxLibCommand (cmdComputePointMap).execute (); // Get info about the computed point map and copy it into a std::vector std::vector<float> pointMap; int width, height; camera_[itmImages][itmPointMap].getBinaryDataInfo (&width, &height, 0, 0, 0, 0); camera_[itmImages][itmPointMap].getBinaryData (pointMap, 0); // Copy point cloud and convert in meters cloud->header.stamp = getPCLStamp (timestamp); cloud->points.resize (height * width); cloud->width = width; cloud->height = height; cloud->is_dense = false; // Copy data in point cloud (and convert milimeters in meters) for (size_t i = 0; i < pointMap.size (); i += 3) { cloud->points[i / 3].x = pointMap[i] / 1000.0; cloud->points[i / 3].y = pointMap[i + 1] / 1000.0; cloud->points[i / 3].z = pointMap[i + 2] / 1000.0; } } // Publish signals if (num_slots<sig_cb_ensenso_point_cloud_images> () > 0) point_cloud_images_signal_->operator () (cloud, images); else if (num_slots<sig_cb_ensenso_point_cloud> () > 0) point_cloud_signal_->operator () (cloud); else if (num_slots<sig_cb_ensenso_images> () > 0) images_signal_->operator () (images); } continue_grabbing = running_; } catch (NxLibException &ex) { ensensoExceptionHandling (ex, "processGrabbing"); } } }