std::vector<bool>
MultiviewRecognizerWithChangeDetection<PointT>::getHypothesisInViewsMask(ModelTPtr model, const Eigen::Matrix4f &pose, size_t origin_id) {

    std::vector<size_t> sorted_view_ids = getMapKeys(views_);
    std::sort(sorted_view_ids.begin(), sorted_view_ids.end());

    size_t hypothesis_removed_since_view = id_ + 1; // assuming never removed (id_+1 = future)
    for(std::vector<size_t>::iterator view_id = sorted_view_ids.begin(); view_id < sorted_view_ids.end(); view_id++) {
        if(*view_id <= origin_id) {
            continue; // hypothesis can not be removed before discovered
        }
        if(removed_points_history_[*view_id]->empty()) {
            continue; // no changes were found in this view
        }

        CloudPtr model_aligned(new Cloud);
        pcl::transformPointCloud(*model->assembled_, *model_aligned, pose);
        int rem_support = v4r::ChangeDetector<PointT>::removalSupport(
                              removed_points_history_[*view_id], model_aligned, param_.tolerance_for_cloud_diff_, 1.1)->size();

        PCL_INFO("Support for the removal of object [%s,%d]: %d\n", model->id_.c_str(), *view_id, rem_support);
        if(rem_support > param_.min_points_for_hyp_removal_) {
            hypothesis_removed_since_view = *view_id;
        }
    }

    std::vector<bool> mask;
    for(auto view : views_) {
        mask.push_back(view.first >= origin_id && view.first < hypothesis_removed_since_view);
    }
    return mask;
}
Example #2
0
  void
  pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::recognize ()
  {

    models_.reset (new std::vector<ModelT>);
    transforms_.reset (new std::vector<Eigen::Matrix4d, Eigen::aligned_allocator<Eigen::Matrix4d> >);

    PointInTPtr processed (new pcl::PointCloud<PointInT>);
    PointInTPtr in (new pcl::PointCloud<PointInT>);

    std::vector<pcl::PointCloud<FeatureT>, Eigen::aligned_allocator<pcl::PointCloud<FeatureT> > > signatures;
    std::vector < Eigen::Vector3d > centroids;

    if (indices_.size ())
      pcl::copyPointCloud (*input_, indices_, *in);
    else
      in = input_;

    {
      pcl::ScopeTime t ("Estimate feature");
      micvfh_estimator_->estimate (in, processed, signatures, centroids);
    }

    std::vector<index_score> indices_scores;
    descriptor_distances_.clear ();

    if (signatures.size () > 0)
    {

      {
        pcl::ScopeTime t_matching ("Matching and roll...");

        if (use_single_categories_ && (categories_to_be_searched_.size () > 0))
        {

          //perform search of the different signatures in the categories_to_be_searched_
          for (size_t c = 0; c < categories_to_be_searched_.size (); c++)
          {
            std::cout << "Using category:" << categories_to_be_searched_[c] << std::endl;
            for (size_t idx = 0; idx < signatures.size (); idx++)
            {
              /*double* hist = signatures[idx].points[0].histogram;
               std::vector<double> std_hist (hist, hist + getHistogramLength (dummy));
               flann_model histogram ("", std_hist);
               flann::Matrix<int> indices;
               flann::Matrix<double> distances;

               std::map<std::string, int>::iterator it;
               it = category_to_vectors_indices_.find (categories_to_be_searched_[c]);

               assert (it != category_to_vectors_indices_.end ());
               nearestKSearch (single_categories_index_[it->second], histogram, nmodels_, indices, distances);*/

              double* hist = signatures[idx].points[0].histogram;
              int size_feat = sizeof(signatures[idx].points[0].histogram) / sizeof(double);
              std::vector<double> std_hist (hist, hist + size_feat);
              //ModelT empty;

              flann_model histogram;
              histogram.descr = std_hist;
              flann::Matrix<int> indices;
              flann::Matrix<double> distances;

              std::map<std::string, int>::iterator it;
              it = category_to_vectors_indices_.find (categories_to_be_searched_[c]);
              assert (it != category_to_vectors_indices_.end ());

              nearestKSearch (single_categories_index_[it->second], histogram, NN_, indices, distances);
              //gather NN-search results
              double score = 0;
              for (size_t i = 0; i < NN_; ++i)
              {
                score = distances[0][i];
                index_score is;
                is.idx_models_ = single_categories_pointers_to_models_[it->second]->at (indices[0][i]);
                is.idx_input_ = static_cast<int> (idx);
                is.score_ = score;
                indices_scores.push_back (is);
              }
            }

            //we cannot add more than nmodels per category, so sort here and remove offending ones...
            std::sort (indices_scores.begin (), indices_scores.end (), sortIndexScoresOp);
            indices_scores.resize ((c + 1) * NN_);
          }
        }
        else
        {
          for (size_t idx = 0; idx < signatures.size (); idx++)
          {

            double* hist = signatures[idx].points[0].histogram;
            int size_feat = sizeof(signatures[idx].points[0].histogram) / sizeof(double);
            std::vector<double> std_hist (hist, hist + size_feat);
            //ModelT empty;

            flann_model histogram;
            histogram.descr = std_hist;

            flann::Matrix<int> indices;
            flann::Matrix<double> distances;
            nearestKSearch (flann_index_, histogram, NN_, indices, distances);

            //gather NN-search results
            double score = 0;
            for (int i = 0; i < NN_; ++i)
            {
              score = distances[0][i];
              index_score is;
              is.idx_models_ = indices[0][i];
              is.idx_input_ = static_cast<int> (idx);
              is.score_ = score;
              indices_scores.push_back (is);

              //ModelT m = flann_models_[indices[0][i]].model;
            }
          }
        }

        std::sort (indices_scores.begin (), indices_scores.end (), sortIndexScoresOp);

        /*
         * There might be duplicated candidates, in those cases it makes sense to take
         * the closer one in descriptor space
         */

        typename std::map<flann_model, bool> found;
        typename std::map<flann_model, bool>::iterator it_map;
        for (size_t i = 0; i < indices_scores.size (); i++)
        {
          flann_model m = flann_models_[indices_scores[i].idx_models_];
          it_map = found.find (m);
          if (it_map == found.end ())
          {
            indices_scores[found.size ()] = indices_scores[i];
            found[m] = true;
          }
        }
        indices_scores.resize (found.size ());

        int num_n = std::min (NN_, static_cast<int> (indices_scores.size ()));

        /*
         * Filter some hypothesis regarding to their distance to the first neighbour
         */

        /*std::vector<index_score> indices_scores_filtered;
         indices_scores_filtered.resize (num_n);
         indices_scores_filtered[0] = indices_scores[0];

         double best_score = indices_scores[0].score_;
         int kept = 1;
         for (int i = 1; i < num_n; ++i)
         {
         //std::cout << best_score << indices_scores[i].score_ << (best_score / indices_scores[i].score_) << std::endl;
         if ((best_score / indices_scores[i].score_) > 0.65)
         {
         indices_scores_filtered[i] = indices_scores[i];
         kept++;
         }

         //best_score = indices_scores[i].score_;
         }

         indices_scores_filtered.resize (kept);
         //std::cout << indices_scores_filtered.size () << " ยง " << num_n << std::endl;

         indices_scores = indices_scores_filtered;
         num_n = indices_scores.size ();*/
        std::cout << "Number of object hypotheses... " << num_n << std::endl;

        std::vector<bool> valid_trans;
        std::vector < Eigen::Matrix4d, Eigen::aligned_allocator<Eigen::Matrix4d> > transformations;

        micvfh_estimator_->getValidTransformsVec (valid_trans);
        micvfh_estimator_->getTransformsVec (transformations);

        for (int i = 0; i < num_n; ++i)
        {
          ModelT m = flann_models_[indices_scores[i].idx_models_].model;
          int view_id = flann_models_[indices_scores[i].idx_models_].view_id;
          int desc_id = flann_models_[indices_scores[i].idx_models_].descriptor_id;

          int idx_input = indices_scores[i].idx_input_;

          std::cout << m.class_ << "/" << m.id_ << " ==> " << indices_scores[i].score_ << std::endl;

          Eigen::Matrix4d roll_view_pose;
          bool roll_pose_found = getRollPose (m, view_id, desc_id, roll_view_pose);

          if (roll_pose_found && valid_trans[idx_input])
          {
            Eigen::Matrix4d transposed = roll_view_pose.transpose ();

            //std::cout << transposed << std::endl;

            PointInTPtr view;
            getView (m, view_id, view);

            Eigen::Matrix4d model_view_pose;
            getPose (m, view_id, model_view_pose);

            Eigen::Matrix4d scale_mat;
            scale_mat.setIdentity (4, 4);

            if (compute_scale_)
            {
              //compute scale using the whole view
              PointInTPtr view_transformed (new pcl::PointCloud<PointInT>);
              Eigen::Matrix4d hom_from_OVC_to_CC;
              hom_from_OVC_to_CC = transformations[idx_input].inverse () * transposed;
              pcl::transformPointCloud (*view, *view_transformed, hom_from_OVC_to_CC);

              Eigen::Vector3d input_centroid = centroids[indices_scores[i].idx_input_];
              Eigen::Vector3d view_centroid;
              getCentroid (m, view_id, desc_id, view_centroid);

              Eigen::Vector4d cmatch4f (view_centroid[0], view_centroid[1], view_centroid[2], 0);
              Eigen::Vector4d cinput4f (input_centroid[0], input_centroid[1], input_centroid[2], 0);

              Eigen::Vector4d max_pt_input;
              pcl::getMaxDistance (*processed, cinput4f, max_pt_input);
              max_pt_input[3] = 0;
              double max_dist_input = (cinput4f - max_pt_input).norm ();

              //compute max dist for transformed model_view
              pcl::getMaxDistance (*view, cmatch4f, max_pt_input);
              max_pt_input[3] = 0;
              double max_dist_view = (cmatch4f - max_pt_input).norm ();

              cmatch4f = hom_from_OVC_to_CC * cmatch4f;
              std::cout << max_dist_view << " " << max_dist_input << std::endl;

              double scale_factor_view = max_dist_input / max_dist_view;
              std::cout << "Scale factor:" << scale_factor_view << std::endl;

              Eigen::Matrix4d center, center_inv;

              center.setIdentity (4, 4);
              center (0, 3) = -cinput4f[0];
              center (1, 3) = -cinput4f[1];
              center (2, 3) = -cinput4f[2];

              center_inv.setIdentity (4, 4);
              center_inv (0, 3) = cinput4f[0];
              center_inv (1, 3) = cinput4f[1];
              center_inv (2, 3) = cinput4f[2];

              scale_mat (0, 0) = scale_factor_view;
              scale_mat (1, 1) = scale_factor_view;
              scale_mat (2, 2) = scale_factor_view;

              scale_mat = center_inv * scale_mat * center;
            }

            Eigen::Matrix4d hom_from_OC_to_CC;
            hom_from_OC_to_CC = scale_mat * transformations[idx_input].inverse () * transposed * model_view_pose;

            models_->push_back (m);
            transforms_->push_back (hom_from_OC_to_CC);
            descriptor_distances_.push_back (static_cast<double> (indices_scores[i].score_));
          }
          else
          {
            PCL_WARN("The roll pose was not found, should use CRH here... \n");
          }
        }
      }

      std::cout << "Number of object hypotheses:" << models_->size () << std::endl;

      /**
       * POSE REFINEMENT
       **/

      if (ICP_iterations_ > 0)
      {
        pcl::ScopeTime t ("Pose refinement");

        //Prepare scene and model clouds for the pose refinement step
        double VOXEL_SIZE_ICP_ = 0.005;
        PointInTPtr cloud_voxelized_icp (new pcl::PointCloud<PointInT> ());

        {
          pcl::ScopeTime t ("Voxelize stuff...");
          pcl::VoxelGrid<PointInT> voxel_grid_icp;
          voxel_grid_icp.setInputCloud (processed);
          voxel_grid_icp.setLeafSize (VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
          voxel_grid_icp.filter (*cloud_voxelized_icp);
          source_->voxelizeAllModels (VOXEL_SIZE_ICP_);
        }

#pragma omp parallel for num_threads(omp_get_num_procs())
        for (int i = 0; i < static_cast<int> (models_->size ()); i++)
        {

          ConstPointInTPtr model_cloud;
          PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);

          if (compute_scale_)
          {
            model_cloud = models_->at (i).getAssembled (-1);
            PointInTPtr model_aligned_m (new pcl::PointCloud<PointInT>);
            pcl::transformPointCloud (*model_cloud, *model_aligned_m, transforms_->at (i));
            pcl::VoxelGrid<PointInT> voxel_grid_icp;
            voxel_grid_icp.setInputCloud (model_aligned_m);
            voxel_grid_icp.setLeafSize (VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
            voxel_grid_icp.filter (*model_aligned);
          }
          else
          {
            model_cloud = models_->at (i).getAssembled (VOXEL_SIZE_ICP_);
            pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
          }

          pcl::IterativeClosestPoint<PointInT, PointInT> reg;
          reg.setInputCloud (model_aligned); //model
          reg.setInputTarget (cloud_voxelized_icp); //scene
          reg.setMaximumIterations (ICP_iterations_);
          reg.setMaxCorrespondenceDistance (VOXEL_SIZE_ICP_ * 3.);
          reg.setTransformationEpsilon (1e-6);

          typename pcl::PointCloud<PointInT>::Ptr output_ (new pcl::PointCloud<PointInT> ());
          reg.align (*output_);

          Eigen::Matrix4d icp_trans = reg.getFinalTransformation ();
          transforms_->at (i) = icp_trans * transforms_->at (i);
        }
      }

      /**
       * HYPOTHESES VERIFICATION
       **/

      if (hv_algorithm_)
      {

        pcl::ScopeTime t ("HYPOTHESES VERIFICATION");

        std::vector<typename pcl::PointCloud<PointInT>::ConstPtr> aligned_models;
        aligned_models.resize (models_->size ());

        for (size_t i = 0; i < models_->size (); i++)
        {
          ConstPointInTPtr model_cloud;
          PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);

          if (compute_scale_)
          {
            model_cloud = models_->at (i).getAssembled (-1);
            PointInTPtr model_aligned_m (new pcl::PointCloud<PointInT>);
            pcl::transformPointCloud (*model_cloud, *model_aligned_m, transforms_->at (i));
            pcl::VoxelGrid<PointInT> voxel_grid_icp;
            voxel_grid_icp.setInputCloud (model_aligned_m);
            voxel_grid_icp.setLeafSize (0.005, 0.005, 0.005);
            voxel_grid_icp.filter (*model_aligned);
          }
          else
          {
            model_cloud = models_->at (i).getAssembled (0.005);
            pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
          }

          //ConstPointInTPtr model_cloud = models_->at (i).getAssembled (0.005);
          //PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
          //pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
          aligned_models[i] = model_aligned;
        }

        std::vector<bool> mask_hv;
        hv_algorithm_->setSceneCloud (input_);
        hv_algorithm_->addModels (aligned_models, true);
        hv_algorithm_->verify ();
        hv_algorithm_->getMask (mask_hv);

        boost::shared_ptr < std::vector<ModelT> > models_temp;
        boost::shared_ptr < std::vector<Eigen::Matrix4d, Eigen::aligned_allocator<Eigen::Matrix4d> > > transforms_temp;

        models_temp.reset (new std::vector<ModelT>);
        transforms_temp.reset (new std::vector<Eigen::Matrix4d, Eigen::aligned_allocator<Eigen::Matrix4d> >);

        for (size_t i = 0; i < models_->size (); i++)
        {
          if (!mask_hv[i])
            continue;

          models_temp->push_back (models_->at (i));
          transforms_temp->push_back (transforms_->at (i));
        }

        models_ = models_temp;
        transforms_ = transforms_temp;
      }

    }
  }
void
pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::recognize ()
{

    models_.reset (new std::vector<ModelT>);
    transforms_.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);

    PointInTPtr processed;
    typename pcl::PointCloud<FeatureT>::Ptr signatures (new pcl::PointCloud<FeatureT> ());
    //pcl::PointCloud<int> keypoints_input;
    PointInTPtr keypoints_pointcloud;

    if (signatures_ != 0 && processed_ != 0 && (signatures_->size () == keypoints_pointcloud->points.size ()))
    {
        keypoints_pointcloud = keypoints_input_;
        signatures = signatures_;
        processed = processed_;
        std::cout << "Using the ISPK ..." << std::endl;
    }
    else
    {
        processed.reset( (new pcl::PointCloud<PointInT>));
        if (indices_.size () > 0)
        {
            PointInTPtr sub_input (new pcl::PointCloud<PointInT>);
            pcl::copyPointCloud (*input_, indices_, *sub_input);
            estimator_->estimate (sub_input, processed, keypoints_pointcloud, signatures);
        }
        else
        {
            estimator_->estimate (input_, processed, keypoints_pointcloud, signatures);
        }

        processed_ = processed;

    }

    std::cout << "Number of keypoints:" << keypoints_pointcloud->points.size () << std::endl;

    int size_feat = sizeof(signatures->points[0].histogram) / sizeof(float);

    //feature matching and object hypotheses
    std::map<std::string, ObjectHypothesis> object_hypotheses;

    {
        for (size_t idx = 0; idx < signatures->points.size (); idx++)
        {
            float* hist = signatures->points[idx].histogram;
            std::vector<float> std_hist (hist, hist + size_feat);
            flann_model histogram;
            histogram.descr = std_hist;
            flann::Matrix<int> indices;
            flann::Matrix<float> distances;
            nearestKSearch (flann_index_, histogram, 1, indices, distances);

            //read view pose and keypoint coordinates, transform keypoint coordinates to model coordinates
            Eigen::Matrix4f homMatrixPose;
            getPose (flann_models_.at (indices[0][0]).model, flann_models_.at (indices[0][0]).view_id, homMatrixPose);

            typename pcl::PointCloud<PointInT>::Ptr keypoints (new pcl::PointCloud<PointInT> ());
            getKeypoints (flann_models_.at (indices[0][0]).model, flann_models_.at (indices[0][0]).view_id, keypoints);

            PointInT view_keypoint = keypoints->points[flann_models_.at (indices[0][0]).keypoint_id];
            PointInT model_keypoint;
            model_keypoint.getVector4fMap () = homMatrixPose.inverse () * view_keypoint.getVector4fMap ();

            typename std::map<std::string, ObjectHypothesis>::iterator it_map;
            if ((it_map = object_hypotheses.find (flann_models_.at (indices[0][0]).model.id_)) != object_hypotheses.end ())
            {
                //if the object hypothesis already exists, then add information
                ObjectHypothesis oh = (*it_map).second;
                oh.correspondences_pointcloud->points.push_back (model_keypoint);
                oh.correspondences_to_inputcloud->push_back (
                            pcl::Correspondence (static_cast<int> (oh.correspondences_pointcloud->points.size () - 1),
                                                 static_cast<int> (idx), distances[0][0]));
                oh.feature_distances_->push_back (distances[0][0]);

            }
            else
            {
                //create object hypothesis
                ObjectHypothesis oh;

                typename pcl::PointCloud<PointInT>::Ptr correspondences_pointcloud (new pcl::PointCloud<PointInT> ());
                correspondences_pointcloud->points.push_back (model_keypoint);

                oh.model_ = flann_models_.at (indices[0][0]).model;
                oh.correspondences_pointcloud = correspondences_pointcloud;
                //last keypoint for this model is a correspondence the current scene keypoint

                pcl::CorrespondencesPtr corr (new pcl::Correspondences ());
                oh.correspondences_to_inputcloud = corr;
                oh.correspondences_to_inputcloud->push_back (pcl::Correspondence (0, static_cast<int> (idx), distances[0][0]));

                boost::shared_ptr < std::vector<float> > feat_dist (new std::vector<float>);
                feat_dist->push_back (distances[0][0]);

                oh.feature_distances_ = feat_dist;
                object_hypotheses[oh.model_.id_] = oh;
            }
        }
    }

    typename std::map<std::string, ObjectHypothesis>::iterator it_map;

    std::vector<float> feature_distance_avg;

    {
        //pcl::ScopeTime t("Geometric verification, RANSAC and transform estimation");
        for (it_map = object_hypotheses.begin (); it_map != object_hypotheses.end (); it_map++)
        {
            std::vector < pcl::Correspondences > corresp_clusters;
            cg_algorithm_->setSceneCloud (keypoints_pointcloud);
            cg_algorithm_->setInputCloud ((*it_map).second.correspondences_pointcloud);
            cg_algorithm_->setModelSceneCorrespondences ((*it_map).second.correspondences_to_inputcloud);
            cg_algorithm_->cluster (corresp_clusters);

            std::cout << "Instances:" << corresp_clusters.size () << " Total correspondences:" << (*it_map).second.correspondences_to_inputcloud->size () << " " << it_map->first << std::endl;
            std::vector<bool> good_indices_for_hypothesis (corresp_clusters.size (), true);

            if (threshold_accept_model_hypothesis_ < 1.f)
            {
                //sort the hypotheses for each model according to their correspondences and take those that are threshold_accept_model_hypothesis_ over the max cardinality
                int max_cardinality = -1;
                for (size_t i = 0; i < corresp_clusters.size (); i++)
                {
                    //std::cout <<  (corresp_clusters[i]).size() << " -- " << (*(*it_map).second.correspondences_to_inputcloud).size() << std::endl;
                    if (max_cardinality < static_cast<int> (corresp_clusters[i].size ()))
                    {
                        max_cardinality = static_cast<int> (corresp_clusters[i].size ());
                    }
                }

                for (size_t i = 0; i < corresp_clusters.size (); i++)
                {
                    if (static_cast<float> ((corresp_clusters[i]).size ()) < (threshold_accept_model_hypothesis_ * static_cast<float> (max_cardinality)))
                    {
                        good_indices_for_hypothesis[i] = false;
                    }
                }
            }

            for (size_t i = 0; i < corresp_clusters.size (); i++)
            {

                if (!good_indices_for_hypothesis[i])
                    continue;

                //drawCorrespondences (processed, it_map->second, keypoints_pointcloud, corresp_clusters[i]);

                Eigen::Matrix4f best_trans;
                typename pcl::registration::TransformationEstimationSVD < PointInT, PointInT > t_est;
                t_est.estimateRigidTransformation (*(*it_map).second.correspondences_pointcloud, *keypoints_pointcloud, corresp_clusters[i], best_trans);

                models_->push_back ((*it_map).second.model_);
                transforms_->push_back (best_trans);

            }
        }
    }

    std::cout << "Number of hypotheses:" << models_->size() << std::endl;

    /**
     * POSE REFINEMENT
     **/

    if (ICP_iterations_ > 0)
    {
        pcl::ScopeTime ticp ("ICP ");

        //Prepare scene and model clouds for the pose refinement step
        PointInTPtr cloud_voxelized_icp (new pcl::PointCloud<PointInT> ());
        pcl::VoxelGrid<PointInT> voxel_grid_icp;
        voxel_grid_icp.setInputCloud (processed);
        voxel_grid_icp.setLeafSize (VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
        voxel_grid_icp.filter (*cloud_voxelized_icp);
        source_->voxelizeAllModels (VOXEL_SIZE_ICP_);

#pragma omp parallel for schedule(dynamic,1) num_threads(omp_get_num_procs())
        for (int i = 0; i < static_cast<int>(models_->size ()); i++)
        {

            ConstPointInTPtr model_cloud;
            PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
            model_cloud = models_->at (i).getAssembled (VOXEL_SIZE_ICP_);
            pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));

            typename pcl::registration::CorrespondenceRejectorSampleConsensus<PointInT>::Ptr rej (
                        new pcl::registration::CorrespondenceRejectorSampleConsensus<PointInT> ());

            rej->setInputTarget (cloud_voxelized_icp);
            rej->setMaximumIterations (1000);
            rej->setInlierThreshold (0.005f);
            rej->setInputSource (model_aligned);

            pcl::IterativeClosestPoint<PointInT, PointInT> reg;
            reg.addCorrespondenceRejector (rej);
            reg.setInputTarget (cloud_voxelized_icp); //scene
            reg.setInputSource (model_aligned); //model
            reg.setMaximumIterations (ICP_iterations_);
            reg.setMaxCorrespondenceDistance (VOXEL_SIZE_ICP_ * 4.f);

            typename pcl::PointCloud<PointInT>::Ptr output_ (new pcl::PointCloud<PointInT> ());
            reg.align (*output_);

            Eigen::Matrix4f icp_trans = reg.getFinalTransformation ();
            transforms_->at (i) = icp_trans * transforms_->at (i);
        }
    }

    /**
     * HYPOTHESES VERIFICATION
     **/

    if (hv_algorithm_)
    {

        pcl::ScopeTime thv ("HV verification");

        std::vector<typename pcl::PointCloud<PointInT>::ConstPtr> aligned_models;
        aligned_models.resize (models_->size ());
        for (size_t i = 0; i < models_->size (); i++)
        {
            ConstPointInTPtr model_cloud = models_->at (i).getAssembled (0.0025f);
            //ConstPointInTPtr model_cloud = models_->at (i).getAssembled (VOXEL_SIZE_ICP_);
            PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
            pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
            aligned_models[i] = model_aligned;
        }

        std::vector<bool> mask_hv;
        hv_algorithm_->setSceneCloud (input_);
        hv_algorithm_->addModels (aligned_models, true);
        hv_algorithm_->verify ();
        hv_algorithm_->getMask (mask_hv);

        boost::shared_ptr < std::vector<ModelT> > models_temp;
        boost::shared_ptr < std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > > transforms_temp;

        models_temp.reset (new std::vector<ModelT>);
        transforms_temp.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);

        for (size_t i = 0; i < models_->size (); i++)
        {
            if (!mask_hv[i])
                continue;

            models_temp->push_back (models_->at (i));
            transforms_temp->push_back (transforms_->at (i));
        }

        models_ = models_temp;
        transforms_ = transforms_temp;

    }
}
Example #4
0
void
Recognizer<PointT>::poseRefinement()
{
    PointTPtr scene_voxelized (new pcl::PointCloud<PointT> ());
    pcl::VoxelGrid<PointT> voxel_grid_icp;
    voxel_grid_icp.setInputCloud (scene_);
    voxel_grid_icp.setLeafSize (param_.voxel_size_icp_, param_.voxel_size_icp_, param_.voxel_size_icp_);
    voxel_grid_icp.filter (*scene_voxelized);

    switch (param_.icp_type_)
    {
    case 0:
    {
#pragma omp parallel for schedule(dynamic,1) num_threads(omp_get_num_procs())
        for (size_t i = 0; i < models_.size (); i++)
        {
//            std::cout << "Doing ICP (type 0) for model " << models_[i]->id_ << " (" << i << " / " << models_.size() << ")" << std::endl;
            ConstPointTPtr model_cloud = models_[i]->getAssembled ( param_.resolution_mm_model_assembly_ );
            PointTPtr model_aligned (new pcl::PointCloud<PointT>);
            pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_[i]);

            typename pcl::registration::CorrespondenceRejectorSampleConsensus<PointT>::Ptr
                    rej (new pcl::registration::CorrespondenceRejectorSampleConsensus<PointT> ());

            rej->setInputTarget (scene_voxelized);
            rej->setMaximumIterations (1000);
            rej->setInlierThreshold (0.005f);
            rej->setInputSource (model_aligned);

            pcl::IterativeClosestPoint<PointT, PointT> reg;
            reg.addCorrespondenceRejector (rej);
            reg.setInputTarget (scene_voxelized);
            reg.setInputSource (model_aligned);
            reg.setMaximumIterations (param_.icp_iterations_);
            reg.setMaxCorrespondenceDistance (param_.max_corr_distance_);

            typename pcl::PointCloud<PointT>::Ptr output_ (new pcl::PointCloud<PointT> ());
            reg.align (*output_);

            Eigen::Matrix4f icp_trans = reg.getFinalTransformation ();
            transforms_[i] = icp_trans * transforms_[i];
        }
    }
        break;
    default:
    {
#pragma omp parallel for schedule(dynamic,1) num_threads(omp_get_num_procs())
        for (size_t i = 0; i < models_.size(); i++)
        {
//            std::cout << "Doing ICP for model " << models_[i]->id_ << " (" << i << " / " << models_.size() << ")" << std::endl;
            typename VoxelBasedCorrespondenceEstimation<PointT, PointT>::Ptr
                    est (new VoxelBasedCorrespondenceEstimation<PointT, PointT> ());

            typename pcl::registration::CorrespondenceRejectorSampleConsensus<PointT>::Ptr
                    rej (new pcl::registration::CorrespondenceRejectorSampleConsensus<PointT> ());

            Eigen::Matrix4f scene_to_model_trans = transforms_[i].inverse ();
            boost::shared_ptr<distance_field::PropagationDistanceField<PointT> > dt;
            models_[i]->getVGDT (dt);

            PointTPtr model_aligned (new pcl::PointCloud<PointT>);
            PointTPtr scene_voxelized_icp_cropped (new pcl::PointCloud<PointT>);
            typename pcl::PointCloud<PointT>::ConstPtr cloud;
            dt->getInputCloud(cloud);
            model_aligned.reset(new pcl::PointCloud<PointT>(*cloud));

            pcl::transformPointCloud (*scene_voxelized, *scene_voxelized_icp_cropped, scene_to_model_trans);

            PointT minPoint, maxPoint;
            pcl::getMinMax3D(*cloud, minPoint, maxPoint);
            minPoint.x -= param_.max_corr_distance_;
            minPoint.y -= param_.max_corr_distance_;
            minPoint.z -= param_.max_corr_distance_;

            maxPoint.x += param_.max_corr_distance_;
            maxPoint.y += param_.max_corr_distance_;
            maxPoint.z += param_.max_corr_distance_;

            pcl::CropBox<PointT> cropFilter;
            cropFilter.setInputCloud (scene_voxelized_icp_cropped);
            cropFilter.setMin(minPoint.getVector4fMap());
            cropFilter.setMax(maxPoint.getVector4fMap());
            cropFilter.filter (*scene_voxelized_icp_cropped);

            est->setVoxelRepresentationTarget (dt);
            est->setInputSource (scene_voxelized_icp_cropped);
            est->setInputTarget (model_aligned);
            est->setMaxCorrespondenceDistance (param_.max_corr_distance_);

            rej->setInputSource (scene_voxelized_icp_cropped);
            rej->setInputTarget (model_aligned);
            rej->setMaximumIterations (1000);
            rej->setInlierThreshold (0.005f);

            pcl::IterativeClosestPoint<PointT, PointT, float> reg;
            reg.setCorrespondenceEstimation (est);
            reg.addCorrespondenceRejector (rej);
            reg.setInputTarget (model_aligned);
            reg.setInputSource (scene_voxelized_icp_cropped);
            reg.setMaximumIterations (param_.icp_iterations_);
            reg.setEuclideanFitnessEpsilon(1e-5);
            reg.setTransformationEpsilon(0.001f * 0.001f);

            pcl::registration::DefaultConvergenceCriteria<float>::Ptr convergence_criteria;
            convergence_criteria = reg.getConvergeCriteria();
            convergence_criteria->setAbsoluteMSE(1e-12);
            convergence_criteria->setMaximumIterationsSimilarTransforms(15);
            convergence_criteria->setFailureAfterMaximumIterations(false);

            typename pcl::PointCloud<PointT>::Ptr output_ (new pcl::PointCloud<PointT> ());
            reg.align (*output_);

            std::cout << "ICP: iterations: " << param_.icp_iterations_ << ", fitness_score: " << reg.getFitnessScore() << std::endl;

            Eigen::Matrix4f icp_trans;
            icp_trans = reg.getFinalTransformation () * scene_to_model_trans;
            transforms_[i] = icp_trans.inverse ();
            //        std::cout << "Done ICP for model  " << models_[i]->id_ << " (" << i << " / " << models_.size() << ")" << std::endl;

        }
    }
    }
}