/*! @brief the main processing callback of the ECTO pipeline * * this method is called once all input dependencies are satisfied. * The PartsBasedDetector has two input dependencies: a color image and depth image, * both retrieved from the Kinect. If any detection candidates are found, * the bounding boxes, detection confidences and object ids are returned * * @param inputs the input tendrils * @param outputs the output tendrils * @return */ int process(const tendrils& inputs, const tendrils& outputs) { std::cout << "detector: process" << std::endl; std::vector<Candidate> candidates; detector_->detect(*color_, *depth_, candidates); if (true) { if (candidates.size() > 0) { Candidate::sort(candidates); visualizer_->candidates(*color_, candidates, 1, *output_, true); } else { cvtColor(*color_, *output_, CV_RGB2BGR); } cv::waitKey(30); } pose_results_->clear(); return ecto::OK; }
/*! @brief the main processing callback of the ECTO pipeline * * this method is called once all input dependencies are satisfied. * The PartsBasedDetector has two input dependencies: a color image and depth image, * both retrieved from the Kinect. If any detection candidates are found, * the bounding boxes, detection confidences and object ids are returned * * @param inputs the input tendrils * @param outputs the output tendrils * @return */ int process(const tendrils& inputs, const tendrils& outputs) { std::cout << "detector: process" << std::endl; pose_results_->clear(); image_pipeline::PinholeCameraModel camera_model; camera_model.setParams(color_->size(), *camera_intrinsics_, cv::Mat(), cv::Mat(), cv::Mat()); std::vector<Candidate> candidates; detector_->detect(*color_, *depth_, candidates); if (candidates.size() == 0) { if (*visualize_) { cv::cvtColor(*color_, *output_, CV_RGB2BGR); //cv::waitKey(30); } return ecto::OK; } Candidate::sort(candidates); Candidate::nonMaximaSuppression(*color_, candidates, *max_overlap_); if (*visualize_) { visualizer_->candidates(*color_, candidates, candidates.size(), *output_, true); cv::cvtColor(*output_, *output_, CV_RGB2BGR); //cv::waitKey(30); } std::vector<Rect3d> bounding_boxes; std::vector<PointCloud> parts_centers; typename PointCloudClusterer<PointType>::PointProjectFunc projecter = boost::bind(&PartsBasedDetectorCell::projectPixelToRay, this, camera_model, _1); PointCloudClusterer<PointType>::computeBoundingBoxes(candidates, *color_, *depth_, projecter, *input_cloud_, bounding_boxes, parts_centers); // output clusters std::vector<PointType> object_centers; std::vector<PointCloud> clusters; // remove planes from input cloud if needed if(*remove_planes_) { PointCloud::Ptr clusterer_cloud (new PointCloud()); PointCloudClusterer<PointType>::organizedMultiplaneSegmentation(*input_cloud_, *clusterer_cloud); PointCloudClusterer<PointType>::clusterObjects(clusterer_cloud, bounding_boxes, clusters, object_centers); } else { PointCloudClusterer<PointType>::clusterObjects(*input_cloud_, bounding_boxes, clusters, object_centers); } // compute poses (centroid of part centers) // for each object for (int object_it = 0; object_it < candidates.size(); ++object_it) { if(std::isnan(object_centers[object_it].x) || std::isnan(object_centers[object_it].y) || std::isnan(object_centers[object_it].z)) continue; PoseResult result; // no db for now, only one model result.set_object_id(*object_db_, model_name_); result.set_confidence(candidates[object_it].score()); // set the clustered cloud's center as a center... result.set_T(Eigen::Vector3f(object_centers[object_it].getVector3fMap())); // // For the rotation a minimum of two parts is needed // if (part_centers_cloud.size() >= 2 && // !pcl_isnan(part_centers_cloud[0].x) && // !pcl_isnan(part_centers_cloud[0].y) && // !pcl_isnan(part_centers_cloud[0].z) && // !pcl_isnan(part_centers_cloud[1].x) && // !pcl_isnan(part_centers_cloud[1].y) && // !pcl_isnan(part_centers_cloud[1].z)) // { // Eigen::Vector3f center(centroid.block<3, 1>(0, 0)); // // Eigen::Vector3f x_axis( // part_centers_cloud[0].getVector3fMap() - center); // x_axis.normalize(); // Eigen::Vector3f z_axis = // (x_axis.cross( // part_centers_cloud[1].getVector3fMap() - center)).normalized(); // // Eigen::Vector3f y_axis = x_axis.cross(z_axis); // should be normalized // // Eigen::Matrix3f rot_matr; // rot_matr << z_axis, y_axis, -x_axis; // //rot_matr.transposeInPlace(); // // result.set_R(rot_matr); // } // else { result.set_R(Eigen::Quaternionf(1, 0, 0, 0)); } // Only one point of view for this object... sensor_msgs::PointCloud2Ptr cluster_cloud (new sensor_msgs::PointCloud2()); std::vector<sensor_msgs::PointCloud2ConstPtr> ros_clouds (1); pcl::toROSMsg(clusters[object_it], *(cluster_cloud)); ros_clouds[0] = cluster_cloud; result.set_clouds(ros_clouds); std::vector<PointCloud, Eigen::aligned_allocator<PointCloud> > object_cluster (1); object_cluster[0] = clusters[object_it]; pose_results_->push_back(result); } return ecto::OK; }