コード例 #1
0
	/*! @brief the main processing callback of the ECTO pipeline
	 *
	 * this method is called once all input dependencies are satisfied.
	 * The PartsBasedDetector has two input dependencies: a color image and depth image,
	 * both retrieved from the Kinect. If any detection candidates are found,
	 * the bounding boxes, detection confidences and object ids are returned
	 *
	 * @param inputs the input tendrils
	 * @param outputs the output tendrils
	 * @return
	 */
	int process(const tendrils& inputs, const tendrils& outputs)
	{
		std::cout << "detector: process" << std::endl;

		pose_results_->clear();

		image_pipeline::PinholeCameraModel camera_model;
		camera_model.setParams(color_->size(), *camera_intrinsics_, cv::Mat(),
				cv::Mat(), cv::Mat());

		std::vector<Candidate> candidates;
		detector_->detect(*color_, *depth_, candidates);

		if (candidates.size() == 0)
		{
			if (*visualize_)
			{
				cv::cvtColor(*color_, *output_, CV_RGB2BGR);
				//cv::waitKey(30);
			}

			return ecto::OK;
		}

		Candidate::sort(candidates);
		Candidate::nonMaximaSuppression(*color_, candidates, *max_overlap_);

		if (*visualize_)
		{
			visualizer_->candidates(*color_, candidates, candidates.size(), *output_, true);
			cv::cvtColor(*output_, *output_, CV_RGB2BGR);
			//cv::waitKey(30);
		}

		std::vector<Rect3d> bounding_boxes;
		std::vector<PointCloud> parts_centers;

		typename PointCloudClusterer<PointType>::PointProjectFunc projecter =
				boost::bind(&PartsBasedDetectorCell::projectPixelToRay, this,
						camera_model, _1);
		PointCloudClusterer<PointType>::computeBoundingBoxes(candidates,
				*color_, *depth_, projecter, *input_cloud_, bounding_boxes,
				parts_centers);


		// output clusters
		std::vector<PointType> object_centers;
		std::vector<PointCloud> clusters;

		// remove planes from input cloud if needed
		if(*remove_planes_)
		{
			PointCloud::Ptr clusterer_cloud (new PointCloud());
			PointCloudClusterer<PointType>::organizedMultiplaneSegmentation(*input_cloud_, *clusterer_cloud);
			PointCloudClusterer<PointType>::clusterObjects(clusterer_cloud,
					bounding_boxes, clusters, object_centers);
		}
		else
		{
			PointCloudClusterer<PointType>::clusterObjects(*input_cloud_,
					bounding_boxes, clusters, object_centers);
		}




		// compute poses (centroid of part centers)

		// for each object
		for (int object_it = 0; object_it < candidates.size(); ++object_it)
		{
			if(std::isnan(object_centers[object_it].x) || std::isnan(object_centers[object_it].y) || std::isnan(object_centers[object_it].z))
				continue;

			PoseResult result;

			// no db for now, only one model
			result.set_object_id(*object_db_, model_name_);
			result.set_confidence(candidates[object_it].score());

			// set the clustered cloud's center as a center...
			result.set_T(Eigen::Vector3f(object_centers[object_it].getVector3fMap()));

//			// For the rotation a minimum of two parts is needed
//			if (part_centers_cloud.size() >= 2 &&
//					!pcl_isnan(part_centers_cloud[0].x) &&
//					!pcl_isnan(part_centers_cloud[0].y) &&
//					!pcl_isnan(part_centers_cloud[0].z) &&
//					!pcl_isnan(part_centers_cloud[1].x) &&
//					!pcl_isnan(part_centers_cloud[1].y) &&
//					!pcl_isnan(part_centers_cloud[1].z))
//			{
//				Eigen::Vector3f center(centroid.block<3, 1>(0, 0));
//
//				Eigen::Vector3f x_axis(
//						part_centers_cloud[0].getVector3fMap() - center);
//				x_axis.normalize();
//				Eigen::Vector3f z_axis =
//						(x_axis.cross(
//								part_centers_cloud[1].getVector3fMap() - center)).normalized();
//
//				Eigen::Vector3f y_axis = x_axis.cross(z_axis); // should be normalized
//
//				Eigen::Matrix3f rot_matr;
//				rot_matr << z_axis, y_axis, -x_axis;
//				//rot_matr.transposeInPlace();
//
//				result.set_R(rot_matr);
//			}
//			else
			{
				result.set_R(Eigen::Quaternionf(1, 0, 0, 0));
			}

			// Only one point of view for this object...
			sensor_msgs::PointCloud2Ptr cluster_cloud (new sensor_msgs::PointCloud2());
	        std::vector<sensor_msgs::PointCloud2ConstPtr> ros_clouds (1);
	        pcl::toROSMsg(clusters[object_it], *(cluster_cloud));
	        ros_clouds[0] = cluster_cloud;
	        result.set_clouds(ros_clouds);

			std::vector<PointCloud, Eigen::aligned_allocator<PointCloud> > object_cluster (1);
			object_cluster[0] = clusters[object_it];

			pose_results_->push_back(result);
		}

		return ecto::OK;
	}
コード例 #2
0
    /** Compute the pose of the table plane
     * @param inputs
     * @param outputs
     * @return
     */
    int
    process(const tendrils& inputs, const tendrils& outputs)
    {
      std::vector<tabletop_object_detector::TabletopObjectRecognizer<pcl::PointXYZ>::TabletopResult > results;

      // Process each table
      pose_results_->clear();

      std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> clusters_merged;
      // Map to store the transformation for each cluster (table_index)
      std::map<pcl::PointCloud<pcl::PointXYZ>::Ptr, size_t> cluster_table;

      std::vector<cv::Vec3f> translations(clusters_->size());
      std::vector<cv::Matx33f> rotations(clusters_->size());
      for (size_t table_index = 0; table_index < clusters_->size(); ++table_index)
      {
        getPlaneTransform((*table_coefficients_)[table_index], rotations[table_index], translations[table_index]);

        // Make the clusters be in the table frame
        size_t n_clusters = (*clusters_)[table_index].size();
        std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> clusters(n_clusters);

        cv::Matx33f Rinv = rotations[table_index].t();
        cv::Vec3f Tinv = -Rinv*translations[table_index];

        for (size_t cluster_index = 0; cluster_index < n_clusters; ++cluster_index)
        {
          clusters[cluster_index] = pcl::PointCloud<pcl::PointXYZ>::Ptr(new pcl::PointCloud<pcl::PointXYZ>());
          for(size_t i = 0; i < (*clusters_)[table_index][cluster_index].size(); ++i)
          {
            cv::Vec3f res = Rinv*(*clusters_)[table_index][cluster_index][i] + Tinv;
            clusters[cluster_index]->push_back(pcl::PointXYZ(res[0], res[1], res[2]));
          }
          cluster_table.insert(std::pair<pcl::PointCloud<pcl::PointXYZ>::Ptr, size_t>(clusters[cluster_index], table_index));
        }

        clusters_merged.insert(clusters_merged.end(), clusters.begin(), clusters.end());
      }

      object_recognizer_.objectDetection(clusters_merged, confidence_cutoff_, perform_fit_merge_, results);

      for (size_t i = 0; i < results.size(); ++i)
      {
        const tabletop_object_detector::TabletopObjectRecognizer<pcl::PointXYZ>::TabletopResult & result = results[i];
        const size_t table_index = cluster_table[result.cloud_];

        PoseResult pose_result;

        // Add the object id
        std::stringstream ss;
        ss << result.object_id_;
        pose_result.set_object_id(db_, ss.str());

        // Add the pose
        const geometry_msgs::Pose &pose = result.pose_;
        cv::Vec3f T(pose.position.x, pose.position.y, pose.position.z);
        Eigen::Quaternionf quat(pose.orientation.w, pose.orientation.x, pose.orientation.y, pose.orientation.z);

        cv::Vec3f new_T = rotations[table_index] * T + translations[table_index];
        pose_result.set_T(cv::Mat(new_T));

        pose_result.set_R(quat);
        cv::Mat R = cv::Mat(rotations[table_index] * pose_result.R<cv::Matx33f>());
        pose_result.set_R(R);
        pose_result.set_confidence(result.confidence_);

        // Add the cluster of points
        std::vector<sensor_msgs::PointCloud2ConstPtr> ros_clouds (1);
        sensor_msgs::PointCloud2Ptr cluster_cloud (new sensor_msgs::PointCloud2());

#if PCL_VERSION_COMPARE(>=,1,7,0)
        ::pcl::PCLPointCloud2 pcd_tmp;
        ::pcl::toPCLPointCloud2(*result.cloud_, pcd_tmp);
        pcl_conversions::fromPCL(pcd_tmp, *cluster_cloud);
#else
        pcl::toROSMsg(*result.cloud_, *cluster_cloud);
#endif
        ros_clouds[0] = cluster_cloud;
        pose_result.set_clouds(ros_clouds);

        pose_results_->push_back(pose_result);
      }
      return ecto::OK;
    }