예제 #1
0
파일: gicp.hpp 프로젝트: 2php/pcl
template <typename PointSource, typename PointTarget> void
pcl::GeneralizedIterativeClosestPoint<PointSource, PointTarget>::estimateRigidTransformationBFGS (const PointCloudSource &cloud_src, 
                                                                                                  const std::vector<int> &indices_src, 
                                                                                                  const PointCloudTarget &cloud_tgt, 
                                                                                                  const std::vector<int> &indices_tgt, 
                                                                                                  Eigen::Matrix4f &transformation_matrix)
{
  if (indices_src.size () < 4)     // need at least 4 samples
  {
    PCL_THROW_EXCEPTION (NotEnoughPointsException, 
                         "[pcl::GeneralizedIterativeClosestPoint::estimateRigidTransformationBFGS] Need at least 4 points to estimate a transform! Source and target have " << indices_src.size () << " points!");
    return;
  }
  // Set the initial solution
  Vector6d x = Vector6d::Zero ();
  x[0] = transformation_matrix (0,3);
  x[1] = transformation_matrix (1,3);
  x[2] = transformation_matrix (2,3);
  x[3] = atan2 (transformation_matrix (2,1), transformation_matrix (2,2));
  x[4] = asin (-transformation_matrix (2,0));
  x[5] = atan2 (transformation_matrix (1,0), transformation_matrix (0,0));

  // Set temporary pointers
  tmp_src_ = &cloud_src;
  tmp_tgt_ = &cloud_tgt;
  tmp_idx_src_ = &indices_src;
  tmp_idx_tgt_ = &indices_tgt;

  // Optimize using forward-difference approximation LM
  const double gradient_tol = 1e-2;
  OptimizationFunctorWithIndices functor(this);
  BFGS<OptimizationFunctorWithIndices> bfgs (functor);
  bfgs.parameters.sigma = 0.01;
  bfgs.parameters.rho = 0.01;
  bfgs.parameters.tau1 = 9;
  bfgs.parameters.tau2 = 0.05;
  bfgs.parameters.tau3 = 0.5;
  bfgs.parameters.order = 3;

  int inner_iterations_ = 0;
  int result = bfgs.minimizeInit (x);
  result = BFGSSpace::Running;
  do
  {
    inner_iterations_++;
    result = bfgs.minimizeOneStep (x);
    if(result)
    {
      break;
    }
    result = bfgs.testGradient(gradient_tol);
  } while(result == BFGSSpace::Running && inner_iterations_ < max_inner_iterations_);
  if(result == BFGSSpace::NoProgress || result == BFGSSpace::Success || inner_iterations_ == max_inner_iterations_)
  {
    PCL_DEBUG ("[pcl::registration::TransformationEstimationBFGS::estimateRigidTransformation]");
    PCL_DEBUG ("BFGS solver finished with exit code %i \n", result);
    transformation_matrix.setIdentity();
    applyState(transformation_matrix, x);
  }
  else
    PCL_THROW_EXCEPTION(SolverDidntConvergeException, 
                        "[pcl::" << getClassName () << "::TransformationEstimationBFGS::estimateRigidTransformation] BFGS solver didn't converge!");
}
예제 #2
0
파일: gicp.hpp 프로젝트: 2php/pcl
template <typename PointSource, typename PointTarget> inline void
pcl::GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess)
{
  pcl::IterativeClosestPoint<PointSource, PointTarget>::initComputeReciprocal ();
  using namespace std;
  // Difference between consecutive transforms
  double delta = 0;
  // Get the size of the target
  const size_t N = indices_->size ();
  // Set the mahalanobis matrices to identity
  mahalanobis_.resize (N, Eigen::Matrix3d::Identity ());
  // Compute target cloud covariance matrices
  if ((!target_covariances_) || (target_covariances_->empty ()))
  {
    target_covariances_.reset (new MatricesVector);  
    computeCovariances<PointTarget> (target_, tree_, *target_covariances_);
  }
  // Compute input cloud covariance matrices
  if ((!input_covariances_) || (input_covariances_->empty ()))
  {
    input_covariances_.reset (new MatricesVector);
    computeCovariances<PointSource> (input_, tree_reciprocal_, *input_covariances_);
  }

  base_transformation_ = guess;
  nr_iterations_ = 0;
  converged_ = false;
  double dist_threshold = corr_dist_threshold_ * corr_dist_threshold_;
  std::vector<int> nn_indices (1);
  std::vector<float> nn_dists (1);

  while(!converged_)
  {
    size_t cnt = 0;
    std::vector<int> source_indices (indices_->size ());
    std::vector<int> target_indices (indices_->size ());

    // guess corresponds to base_t and transformation_ to t
    Eigen::Matrix4d transform_R = Eigen::Matrix4d::Zero ();
    for(size_t i = 0; i < 4; i++)
      for(size_t j = 0; j < 4; j++)
        for(size_t k = 0; k < 4; k++)
          transform_R(i,j)+= double(transformation_(i,k)) * double(guess(k,j));

    Eigen::Matrix3d R = transform_R.topLeftCorner<3,3> ();

    for (size_t i = 0; i < N; i++)
    {
      PointSource query = output[i];
      query.getVector4fMap () = guess * query.getVector4fMap ();
      query.getVector4fMap () = transformation_ * query.getVector4fMap ();

      if (!searchForNeighbors (query, nn_indices, nn_dists))
      {
        PCL_ERROR ("[pcl::%s::computeTransformation] Unable to find a nearest neighbor in the target dataset for point %d in the source!\n", getClassName ().c_str (), (*indices_)[i]);
        return;
      }
      
      // Check if the distance to the nearest neighbor is smaller than the user imposed threshold
      if (nn_dists[0] < dist_threshold)
      {
        Eigen::Matrix3d &C1 = (*input_covariances_)[i];
        Eigen::Matrix3d &C2 = (*target_covariances_)[nn_indices[0]];
        Eigen::Matrix3d &M = mahalanobis_[i];
        // M = R*C1
        M = R * C1;
        // temp = M*R' + C2 = R*C1*R' + C2
        Eigen::Matrix3d temp = M * R.transpose();        
        temp+= C2;
        // M = temp^-1
        M = temp.inverse ();
        source_indices[cnt] = static_cast<int> (i);
        target_indices[cnt] = nn_indices[0];
        cnt++;
      }
    }
    // Resize to the actual number of valid correspondences
    source_indices.resize(cnt); target_indices.resize(cnt);
    /* optimize transformation using the current assignment and Mahalanobis metrics*/
    previous_transformation_ = transformation_;
    //optimization right here
    try
    {
      rigid_transformation_estimation_(output, source_indices, *target_, target_indices, transformation_);
      /* compute the delta from this iteration */
      delta = 0.;
      for(int k = 0; k < 4; k++) {
        for(int l = 0; l < 4; l++) {
          double ratio = 1;
          if(k < 3 && l < 3) // rotation part of the transform
            ratio = 1./rotation_epsilon_;
          else
            ratio = 1./transformation_epsilon_;
          double c_delta = ratio*fabs(previous_transformation_(k,l) - transformation_(k,l));
          if(c_delta > delta)
            delta = c_delta;
        }
      }
    } 
    catch (PCLException &e)
    {
      PCL_DEBUG ("[pcl::%s::computeTransformation] Optimization issue %s\n", getClassName ().c_str (), e.what ());
      break;
    }
    nr_iterations_++;
    // Check for convergence
    if (nr_iterations_ >= max_iterations_ || delta < 1)
    {
      converged_ = true;
      previous_transformation_ = transformation_;
      PCL_DEBUG ("[pcl::%s::computeTransformation] Convergence reached. Number of iterations: %d out of %d. Transformation difference: %f\n",
                 getClassName ().c_str (), nr_iterations_, max_iterations_, (transformation_ - previous_transformation_).array ().abs ().sum ());
    } 
    else
      PCL_DEBUG ("[pcl::%s::computeTransformation] Convergence failed\n", getClassName ().c_str ());
  }
  //for some reason the static equivalent methode raises an error
  // final_transformation_.block<3,3> (0,0) = (transformation_.block<3,3> (0,0)) * (guess.block<3,3> (0,0));
  // final_transformation_.block <3, 1> (0, 3) = transformation_.block <3, 1> (0, 3) + guess.rightCols<1>.block <3, 1> (0, 3);
  final_transformation_.topLeftCorner (3,3) = previous_transformation_.topLeftCorner (3,3) * guess.topLeftCorner (3,3);
  final_transformation_(0,3) = previous_transformation_(0,3) + guess(0,3);
  final_transformation_(1,3) = previous_transformation_(1,3) + guess(1,3);
  final_transformation_(2,3) = previous_transformation_(2,3) + guess(2,3);

  // Transform the point cloud
  pcl::transformPointCloud (*input_, output, final_transformation_);
}
예제 #3
0
template <typename PointSource, typename PointTarget, typename FeatureT> void 
pcl16::SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess)
{
  if (!input_features_)
  {
    PCL16_ERROR ("[pcl16::%s::computeTransformation] ", getClassName ().c_str ());
    PCL16_ERROR ("No source features were given! Call setSourceFeatures before aligning.\n");
    return;
  }
  if (!target_features_)
  {
    PCL16_ERROR ("[pcl16::%s::computeTransformation] ", getClassName ().c_str ());
    PCL16_ERROR ("No target features were given! Call setTargetFeatures before aligning.\n");
    return;
  }

  if (!error_functor_)
  {
    error_functor_.reset (new TruncatedError (static_cast<float> (corr_dist_threshold_)));
  }

  std::vector<int> sample_indices (nr_samples_);
  std::vector<int> corresponding_indices (nr_samples_);
  PointCloudSource input_transformed;
  float error, lowest_error (0);

  final_transformation_ = guess;
  int i_iter = 0;
  if (!guess.isApprox(Eigen::Matrix4f::Identity (), 0.01f)) 
  { //If guess is not the Identity matrix we check it.
	  transformPointCloud (*input_, input_transformed, final_transformation_);
	  lowest_error = computeErrorMetric (input_transformed, static_cast<float> (corr_dist_threshold_));
	  i_iter = 1;
  }

  for (; i_iter < max_iterations_; ++i_iter)
  {
    // Draw nr_samples_ random samples
    selectSamples (*input_, nr_samples_, min_sample_distance_, sample_indices);

    // Find corresponding features in the target cloud
    findSimilarFeatures (*input_features_, sample_indices, corresponding_indices);

    // Estimate the transform from the samples to their corresponding points
    transformation_estimation_->estimateRigidTransformation (*input_, sample_indices, *target_, corresponding_indices, transformation_);

    // Tranform the data and compute the error
    transformPointCloud (*input_, input_transformed, transformation_);
    error = computeErrorMetric (input_transformed, static_cast<float> (corr_dist_threshold_));

    // If the new error is lower, update the final transformation
    if (i_iter == 0 || error < lowest_error)
    {
      lowest_error = error;
      final_transformation_ = transformation_;
    }
  }

  // Apply the final transformation
  transformPointCloud (*input_, output, final_transformation_);
}
예제 #4
0
/** \brief Align a pair of PointCloud datasets and return the result
  * \param cloud_src the source PointCloud
  * \param cloud_tgt the target PointCloud
  * \param output the resultant aligned source PointCloud
  * \param final_transform the resultant transform between source and target
  */
void pairAlign (const PointCloud::Ptr cloud_src, const PointCloud::Ptr cloud_tgt, PointCloud::Ptr output, Eigen::Matrix4f &final_transform, bool downsample = false)
{
  //
  // Downsample for consistency and speed
  // \note enable this for large datasets
  PointCloud::Ptr src (new PointCloud);
  PointCloud::Ptr tgt (new PointCloud);
  pcl::VoxelGrid<PointT> grid;
  if (downsample)
  {
    grid.setLeafSize (0.005, 0.005, 0.005);
    grid.setInputCloud (cloud_src);
    grid.filter (*src);

    grid.setInputCloud (cloud_tgt);
    grid.filter (*tgt);
  }
  else
  {
    src = cloud_src;
    tgt = cloud_tgt;
  }


  // Compute surface normals and curvature
  PointCloudWithNormals::Ptr points_with_normals_src (new PointCloudWithNormals);
  PointCloudWithNormals::Ptr points_with_normals_tgt (new PointCloudWithNormals);

  pcl::NormalEstimation<PointT, PointNormalT> norm_est;
  pcl::search::KdTree<pcl::PointXYZRGBA>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZRGBA> ());
  norm_est.setSearchMethod (tree);
  norm_est.setKSearch (30);
  
  norm_est.setInputCloud (src);
  norm_est.compute (*points_with_normals_src);
  pcl::copyPointCloud (*src, *points_with_normals_src);

  norm_est.setInputCloud (tgt);
  norm_est.compute (*points_with_normals_tgt);
  pcl::copyPointCloud (*tgt, *points_with_normals_tgt);

  //
  // Instantiate our custom point representation (defined above) ...
  MyPointRepresentation point_representation;
  // ... and weight the 'curvature' dimension so that it is balanced against x, y, and z
  float alpha[4] = {1.0, 1.0, 1.0, 1.0};
  point_representation.setRescaleValues (alpha);

  float n;
  //float it;

	//PCL_INFO("Set the max correspondence distance: ");
  //std::cin>> n;
	//PCL_INFO("Set the maximum number of iterations: ");
  //std::cin>> it;
  //PCL_INFO("Set the reciprocal correspondences: ");
  //char d;
	//std::cin >> d;
		
  
  // Align
  pcl::IterativeClosestPointWithNormals<PointNormalT, PointNormalT> reg; //IterativeClosestPointNonLinear
  reg.setTransformationEpsilon (1e-8);
  // Set the maximum distance between two correspondences (src<->tgt) to 10cm
  // Note: adjust this based on the size of your datasets
  reg.setMaxCorrespondenceDistance (0.1); //n
  //reg.setEuclideanFitnessEpsilon (1e-15); 
  //reg.setRANSACIterations(200);
  reg.setUseReciprocalCorrespondences(true);
  reg.setRANSACOutlierRejectionThreshold(0.01);
  // Set the point representation
  reg.setPointRepresentation (boost::make_shared<const MyPointRepresentation> (point_representation));

  reg.setInputSource (points_with_normals_src);
  reg.setInputTarget (points_with_normals_tgt);



  //
  // Run the same optimization in a loop and visualize the results
  Eigen::Matrix4f Ti = Eigen::Matrix4f::Identity (), prev, targetToSource;
  PointCloudWithNormals::Ptr reg_result = points_with_normals_src;
  reg.setMaximumIterations (2);
  for (int i = 0; i < 100; ++i) //it
  {
    PCL_INFO ("Iteration Nr. %d.\n", i);

    // save cloud for visualization purpose
    points_with_normals_src = reg_result;

    // Estimate
    reg.setInputSource (points_with_normals_src);
    reg.align (*reg_result);

		//accumulate transformation between each Iteration
    Ti = reg.getFinalTransformation () * Ti;

		//if the difference between this transformation and the previous one
		//is smaller than the threshold, refine the process by reducing
		//the maximal correspondence distance
    if (fabs ((reg.getLastIncrementalTransformation () - prev).sum ()) < reg.getTransformationEpsilon ())
      reg.setMaxCorrespondenceDistance (reg.getMaxCorrespondenceDistance () - 0.001);
    
    prev = reg.getLastIncrementalTransformation ();

    // visualize current state
    showCloudsRight(points_with_normals_tgt, points_with_normals_src);
  }

	//
  // Get the transformation from target to source
  targetToSource = Ti.inverse();

  //
  // Transform target back in source frame
  pcl::transformPointCloud (*cloud_tgt, *output, targetToSource);

  p->removePointCloud ("source");
  p->removePointCloud ("target");

  PointCloudColorHandlerCustom<PointT> cloud_tgt_h (output, 0, 255, 0);
  PointCloudColorHandlerCustom<PointT> cloud_src_h (cloud_src, 255, 0, 0);
  p->addPointCloud (output, cloud_tgt_h, "target", vp_2);
  p->addPointCloud (cloud_src, cloud_src_h, "source", vp_2);

	PCL_INFO ("Press q to continue the registration.\n");
  p->spin ();

  p->removePointCloud ("source"); 
  p->removePointCloud ("target");
  
  //add the source to the transformed target
  *output += *cloud_src;
  
  final_transform = targetToSource;
 }
예제 #5
0
  void
  pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::recognize ()
  {

    models_.reset (new std::vector<ModelT>);
    transforms_.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);

    PointInTPtr processed;
    typename pcl::PointCloud<FeatureT>::Ptr signatures (new pcl::PointCloud<FeatureT> ());
    //pcl::PointCloud<int> keypoints_input;
    PointInTPtr keypoints_pointcloud;

    if (signatures_ != 0 && processed_ != 0 && (signatures_->size () == keypoints_pointcloud->points.size ()))
    {
      keypoints_pointcloud = keypoints_input_;
      signatures = signatures_;
      processed = processed_;
      std::cout << "Using the ISPK ..." << std::endl;
    }
    else
    {
      processed.reset( (new pcl::PointCloud<PointInT>));
      if (indices_.size () > 0)
      {
        PointInTPtr sub_input (new pcl::PointCloud<PointInT>);
        pcl::copyPointCloud (*input_, indices_, *sub_input);
        estimator_->estimate (sub_input, processed, keypoints_pointcloud, signatures);
      }
      else
      {
        estimator_->estimate (input_, processed, keypoints_pointcloud, signatures);
      }

      processed_ = processed;

    }

    std::cout << "Number of keypoints:" << keypoints_pointcloud->points.size () << std::endl;

    int size_feat = sizeof(signatures->points[0].histogram) / sizeof(float);

    //feature matching and object hypotheses
    std::map<std::string, ObjectHypothesis> object_hypotheses;
    {
      for (size_t idx = 0; idx < signatures->points.size (); idx++)
      {
        float* hist = signatures->points[idx].histogram;
        std::vector<float> std_hist (hist, hist + size_feat);
        flann_model histogram;
        histogram.descr = std_hist;
        flann::Matrix<int> indices;
        flann::Matrix<float> distances;
        nearestKSearch (flann_index_, histogram, 1, indices, distances);

        //read view pose and keypoint coordinates, transform keypoint coordinates to model coordinates
        Eigen::Matrix4f homMatrixPose;
        getPose (flann_models_.at (indices[0][0]).model, flann_models_.at (indices[0][0]).view_id, homMatrixPose);

        typename pcl::PointCloud<PointInT>::Ptr keypoints (new pcl::PointCloud<PointInT> ());
        getKeypoints (flann_models_.at (indices[0][0]).model, flann_models_.at (indices[0][0]).view_id, keypoints);

        PointInT view_keypoint = keypoints->points[flann_models_.at (indices[0][0]).keypoint_id];
        PointInT model_keypoint;
        model_keypoint.getVector4fMap () = homMatrixPose.inverse () * view_keypoint.getVector4fMap ();

        typename std::map<std::string, ObjectHypothesis>::iterator it_map;
        if ((it_map = object_hypotheses.find (flann_models_.at (indices[0][0]).model.id_)) != object_hypotheses.end ())
        {
          //if the object hypothesis already exists, then add information
          ObjectHypothesis oh = (*it_map).second;
          oh.correspondences_pointcloud->points.push_back (model_keypoint);
          oh.correspondences_to_inputcloud->push_back (
                                                       pcl::Correspondence (static_cast<int> (oh.correspondences_pointcloud->points.size () - 1),
                                                                            static_cast<int> (idx), distances[0][0]));
          oh.feature_distances_->push_back (distances[0][0]);

        }
        else
        {
          //create object hypothesis
          ObjectHypothesis oh;

          typename pcl::PointCloud<PointInT>::Ptr correspondences_pointcloud (new pcl::PointCloud<PointInT> ());
          correspondences_pointcloud->points.push_back (model_keypoint);

          oh.model_ = flann_models_.at (indices[0][0]).model;
          oh.correspondences_pointcloud = correspondences_pointcloud;
          //last keypoint for this model is a correspondence the current scene keypoint

          pcl::CorrespondencesPtr corr (new pcl::Correspondences ());
          oh.correspondences_to_inputcloud = corr;
          oh.correspondences_to_inputcloud->push_back (pcl::Correspondence (0, static_cast<int> (idx), distances[0][0]));

          boost::shared_ptr < std::vector<float> > feat_dist (new std::vector<float>);
          feat_dist->push_back (distances[0][0]);

          oh.feature_distances_ = feat_dist;
          object_hypotheses[oh.model_.id_] = oh;
        }
      }
    }

    typename std::map<std::string, ObjectHypothesis>::iterator it_map;

    std::vector<float> feature_distance_avg;

    {
      //pcl::ScopeTime t("Geometric verification, RANSAC and transform estimation");
      for (it_map = object_hypotheses.begin (); it_map != object_hypotheses.end (); it_map++)
      {
        std::vector < pcl::Correspondences > corresp_clusters;
        cg_algorithm_->setSceneCloud (keypoints_pointcloud);
        cg_algorithm_->setInputCloud ((*it_map).second.correspondences_pointcloud);
        cg_algorithm_->setModelSceneCorrespondences ((*it_map).second.correspondences_to_inputcloud);
        cg_algorithm_->cluster (corresp_clusters);

        std::cout << "Instances:" << corresp_clusters.size () << " Total correspondences:" << (*it_map).second.correspondences_to_inputcloud->size () << " " << it_map->first << std::endl;
        std::vector<bool> good_indices_for_hypothesis (corresp_clusters.size (), true);

        if (threshold_accept_model_hypothesis_ < 1.f)
        {
          //sort the hypotheses for each model according to their correspondences and take those that are threshold_accept_model_hypothesis_ over the max cardinality
          int max_cardinality = -1;
          for (size_t i = 0; i < corresp_clusters.size (); i++)
          {
            //std::cout <<  (corresp_clusters[i]).size() << " -- " << (*(*it_map).second.correspondences_to_inputcloud).size() << std::endl;
            if (max_cardinality < static_cast<int> (corresp_clusters[i].size ()))
            {
              max_cardinality = static_cast<int> (corresp_clusters[i].size ());
            }
          }

          for (size_t i = 0; i < corresp_clusters.size (); i++)
          {
            if (static_cast<float> ((corresp_clusters[i]).size ()) < (threshold_accept_model_hypothesis_ * static_cast<float> (max_cardinality)))
            {
              good_indices_for_hypothesis[i] = false;
            }
          }
        }

        for (size_t i = 0; i < corresp_clusters.size (); i++)
        {

          if (!good_indices_for_hypothesis[i])
            continue;

          //drawCorrespondences (processed, it_map->second, keypoints_pointcloud, corresp_clusters[i]);

          Eigen::Matrix4f best_trans;
          typename pcl::registration::TransformationEstimationSVD < PointInT, PointInT > t_est;
          t_est.estimateRigidTransformation (*(*it_map).second.correspondences_pointcloud, *keypoints_pointcloud, corresp_clusters[i], best_trans);

          models_->push_back ((*it_map).second.model_);
          transforms_->push_back (best_trans);

        }
      }
    }

    std::cout << "Number of hypotheses:" << models_->size() << std::endl;

    /**
     * POSE REFINEMENT
     **/

    if (ICP_iterations_ > 0)
    {
      pcl::ScopeTime ticp ("ICP ");

      //Prepare scene and model clouds for the pose refinement step
      PointInTPtr cloud_voxelized_icp (new pcl::PointCloud<PointInT> ());
      pcl::VoxelGrid<PointInT> voxel_grid_icp;
      voxel_grid_icp.setInputCloud (processed);
      voxel_grid_icp.setLeafSize (VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
      voxel_grid_icp.filter (*cloud_voxelized_icp);
      source_->voxelizeAllModels (VOXEL_SIZE_ICP_);

#pragma omp parallel for schedule(dynamic,1) num_threads(omp_get_num_procs())
      for (int i = 0; i < static_cast<int>(models_->size ()); i++)
      {

        ConstPointInTPtr model_cloud;
        PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
        model_cloud = models_->at (i).getAssembled (VOXEL_SIZE_ICP_);
        pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));

        typename pcl::registration::CorrespondenceRejectorSampleConsensus<PointInT>::Ptr rej (
            new pcl::registration::CorrespondenceRejectorSampleConsensus<PointInT> ());

        rej->setInputTarget (cloud_voxelized_icp);
        rej->setMaximumIterations (1000);
        rej->setInlierThreshold (0.005f);
        rej->setInputSource (model_aligned);

        pcl::IterativeClosestPoint<PointInT, PointInT> reg;
        reg.addCorrespondenceRejector (rej);
        reg.setInputTarget (cloud_voxelized_icp); //scene
        reg.setInputSource (model_aligned); //model
        reg.setMaximumIterations (ICP_iterations_);
        reg.setMaxCorrespondenceDistance (VOXEL_SIZE_ICP_ * 4.f);

        typename pcl::PointCloud<PointInT>::Ptr output_ (new pcl::PointCloud<PointInT> ());
        reg.align (*output_);

        Eigen::Matrix4f icp_trans = reg.getFinalTransformation ();
        transforms_->at (i) = icp_trans * transforms_->at (i);
      }
    }

    /**
     * HYPOTHESES VERIFICATION
     **/

    if (hv_algorithm_)
    {

      pcl::ScopeTime thv ("HV verification");

      std::vector<typename pcl::PointCloud<PointInT>::ConstPtr> aligned_models;
      aligned_models.resize (models_->size ());
      for (size_t i = 0; i < models_->size (); i++)
      {
        ConstPointInTPtr model_cloud = models_->at (i).getAssembled (0.0025f);
        //ConstPointInTPtr model_cloud = models_->at (i).getAssembled (VOXEL_SIZE_ICP_);
        PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
        pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
        aligned_models[i] = model_aligned;
      }

      std::vector<bool> mask_hv;
      hv_algorithm_->setSceneCloud (input_);
      hv_algorithm_->addModels (aligned_models, true);
      hv_algorithm_->verify ();
      hv_algorithm_->getMask (mask_hv);

      boost::shared_ptr < std::vector<ModelT> > models_temp;
      boost::shared_ptr < std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > > transforms_temp;

      models_temp.reset (new std::vector<ModelT>);
      transforms_temp.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);

      for (size_t i = 0; i < models_->size (); i++)
      {
        if (!mask_hv[i])
          continue;

        models_temp->push_back (models_->at (i));
        transforms_temp->push_back (transforms_->at (i));
      }

      models_ = models_temp;
      transforms_ = transforms_temp;

    }
  }
예제 #6
0
파일: main.cpp 프로젝트: leonsenft/OpenGP
void set_uniform_matrix(GLuint programID, const char* NAME, const Eigen::Matrix4f& matrix) {
    GLuint matrix_id = glGetUniformLocation(programID, NAME);
    glUniformMatrix4fv(matrix_id, 1, GL_FALSE, matrix.data());
}
예제 #7
0
    Eigen::Matrix4f p2w(Eigen::Matrix4f::Identity());
    p2w.block<3,3>(0,0) = eigDx.transpose();
    p2w.block<3,1>(0,3) = -1.f * (p2w.block<3,3>(0,0) * centroid.head<3>());
    pcl::PointCloud<PointN> cPoints;
    pcl::transformPointCloud(*final, cPoints, p2w);
    PointN min_pt, max_pt;
    pcl::getMinMax3D(cPoints, min_pt, max_pt);
    const Eigen::Vector3f mean_diag = 0.5f*(max_pt.getVector3fMap() + min_pt.getVector3fMap());
    // final transform

    const Eigen::Quaternionf qfinal(eigDx);

    tfinal = eigDx*mean_diag + centroid.head<3>();
    std::cout << "tfinal: " << tfinal << std::endl;
    pcl::PointXYZRGB minp, maxp;
    Eigen::Matrix4f _tr = Eigen::Matrix4f::Identity();
    _tr.topLeftCorner<3,3>() = qfinal.toRotationMatrix();
    _tr.block<3,1>(0,3) = tfinal;


    _x = (max_pt.x-min_pt.x);// * 0.5;
    _y = (max_pt.y-min_pt.y);// * 0.5;
    _z = (max_pt.z-min_pt.z);// * 0.5;

    _tr = _tr.inverse().eval();
    qfinal_r = qfinal;


    pcl::PointIndices::Ptr object_indices (new pcl::PointIndices);
    for (size_t i = 0; i < final->size(); i++){
        PointN p = (*final)[i];
예제 #8
0
void GenericIKWindow::solve()
{
    if (!kc || !ikSolver || !tcp)
    {
        return;
    }

    cout << "---- Solve IK ----" << endl;

    IKSolver::CartesianSelection s = IKSolver::All;

    if (UI.radioButton_Pos->isChecked())
    {
        s = IKSolver::Position;
    }

    //if (UI.radioButton_Ori->isChecked())
    //  s = IKSolver::Orientation;
    //ikSolver->setVerbose(true);
    Eigen::Matrix4f targetPose = box->getGlobalPose();

    /*
    if (kc && kc->getNode(kc->getSize() - 1)->isTranslationalJoint() && kc->getNode(kc->getSize() - 1)->getParent())
    {
        // setup gaze IK
        float v = (kc->getNode(kc->getSize() - 1)->getParent()->getGlobalPose().block(0, 3, 3, 1) - targetPose.block(0, 3, 3, 1)).norm();
        cout << "Setting initial value of translation joint to :" << v << endl;
        ikSolver->setupTranslationalJoint(kc->getNode(kc->getSize() - 1), v);
        kc->getNode(kc->getSize() - 1)->setJointValue(v);
    }*/
    clock_t startT = clock();

    if (ikGazeSolver)
    {
        ikGazeSolver->solve(targetPose.block(0, 3, 3, 1));
    }
    else
    {
        ikSolver->solve(targetPose, s, 50);
    }

    clock_t endT = clock();

    Eigen::Matrix4f actPose = tcp->getGlobalPose();
    float errorPos = (actPose.block(0, 3, 3, 1) - targetPose.block(0, 3, 3, 1)).norm();
    MathTools::Quaternion q1 = MathTools::eigen4f2quat(actPose);
    MathTools::Quaternion q2 = MathTools::eigen4f2quat(targetPose);
    MathTools::Quaternion d = getDelta(q1, q2);
    float errorOri = fabs(180.0f - (d.w + 1.0f) * 90.0f);

    float diffClock = (float)(((float)(endT - startT) / (float)CLOCKS_PER_SEC) * 1000.0f);
    QString qd = "Time: ";
    qd += QString::number(diffClock, 'f', 2);
    qd += " ms";
    UI.labelTime->setText(qd);
    QString qd2 = "Error Pos: : ";
    qd2 += QString::number(errorPos, 'f', 2);
    qd2 += " mm";
    UI.labelPos->setText(qd2);
    QString qd3 = "Error Ori: : ";
    qd3 += QString::number(errorOri, 'f', 2);
    qd3 += " deg";
    UI.labelOri->setText(qd3);

    cout << "Joint values:" << endl;
    std::vector<RobotNodePtr> nodes = kc->getAllRobotNodes();

    for (size_t i = 0; i < nodes.size(); i++)
    {
        cout << nodes[i]->getJointValue() << endl;
    }

    /*
    DifferentialIKPtr j(new DifferentialIK(kc));
    j->setGoal(targetPose,RobotNodePtr(),IKSolver::All);
    j->computeSteps(0.2f,0,50);
    */
    exViewer->render();

    cout << "---- END Solve IK ----" << endl;
}
예제 #9
0
void IndexMap::synthesizeInfo(const Eigen::Matrix4f & pose,
                              const std::pair<GLuint, GLuint> & model,
                              const float depthCutoff,
                              const float confThreshold)
{
    glEnable(GL_PROGRAM_POINT_SIZE);
    glEnable(GL_POINT_SPRITE);

    infoFrameBuffer.Bind();

    glPushAttrib(GL_VIEWPORT_BIT);

    glViewport(0, 0, infoRenderBuffer.width, infoRenderBuffer.height);

    glClearColor(0, 0, 0, 0);

    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    combinedProgram->Bind();

    Eigen::Matrix4f t_inv = pose.inverse();

    Eigen::Vector4f cam(Intrinsics::getInstance().cx(),
                  Intrinsics::getInstance().cy(),
                  Intrinsics::getInstance().fx(),
                  Intrinsics::getInstance().fy());

    combinedProgram->setUniform(Uniform("t_inv", t_inv));
    combinedProgram->setUniform(Uniform("cam", cam));
    combinedProgram->setUniform(Uniform("maxDepth", depthCutoff));
    combinedProgram->setUniform(Uniform("confThreshold", confThreshold));
    combinedProgram->setUniform(Uniform("cols", (float)Resolution::getInstance().cols()));
    combinedProgram->setUniform(Uniform("rows", (float)Resolution::getInstance().rows()));
    combinedProgram->setUniform(Uniform("time", 0));
    combinedProgram->setUniform(Uniform("maxTime", std::numeric_limits<int>::max()));
    combinedProgram->setUniform(Uniform("timeDelta", std::numeric_limits<int>::max()));

    glBindBuffer(GL_ARRAY_BUFFER, model.first);

    glEnableVertexAttribArray(0);
    glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, 0);

    glEnableVertexAttribArray(1);
    glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, reinterpret_cast<GLvoid*>(sizeof(Eigen::Vector4f) * 1));

    glEnableVertexAttribArray(2);
    glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, reinterpret_cast<GLvoid*>(sizeof(Eigen::Vector4f) * 2));

    glDrawTransformFeedback(GL_POINTS, model.second);

    glDisableVertexAttribArray(0);
    glDisableVertexAttribArray(1);
    glDisableVertexAttribArray(2);
    glBindBuffer(GL_ARRAY_BUFFER, 0);

    infoFrameBuffer.Unbind();

    combinedProgram->Unbind();

    glDisable(GL_PROGRAM_POINT_SIZE);
    glDisable(GL_POINT_SPRITE);

    glPopAttrib();

    glFinish();
}
예제 #10
0
template <typename PointT> bool
pcl::SampleConsensusModelSphere<PointT>::computeModelCoefficients (
      const std::vector<int> &samples, Eigen::VectorXf &model_coefficients)
{
  // Need 4 samples
  if (samples.size () != 4)
  {
    PCL_ERROR ("[pcl::SampleConsensusModelSphere::computeModelCoefficients] Invalid set of samples given (%zu)!\n", samples.size ());
    return (false);
  }

  Eigen::Matrix4f temp;
  for (int i = 0; i < 4; i++)
  {
    temp (i, 0) = input_->points[samples[i]].x;
    temp (i, 1) = input_->points[samples[i]].y;
    temp (i, 2) = input_->points[samples[i]].z;
    temp (i, 3) = 1;
  }
  float m11 = temp.determinant ();
  if (m11 == 0)
    return (false);             // the points don't define a sphere!

  for (int i = 0; i < 4; ++i)
    temp (i, 0) = (input_->points[samples[i]].x) * (input_->points[samples[i]].x) +
                  (input_->points[samples[i]].y) * (input_->points[samples[i]].y) +
                  (input_->points[samples[i]].z) * (input_->points[samples[i]].z);
  float m12 = temp.determinant ();

  for (int i = 0; i < 4; ++i)
  {
    temp (i, 1) = temp (i, 0);
    temp (i, 0) = input_->points[samples[i]].x;
  }
  float m13 = temp.determinant ();

  for (int i = 0; i < 4; ++i)
  {
    temp (i, 2) = temp (i, 1);
    temp (i, 1) = input_->points[samples[i]].y;
  }
  float m14 = temp.determinant ();

  for (int i = 0; i < 4; ++i)
  {
    temp (i, 0) = temp (i, 2);
    temp (i, 1) = input_->points[samples[i]].x;
    temp (i, 2) = input_->points[samples[i]].y;
    temp (i, 3) = input_->points[samples[i]].z;
  }
  float m15 = temp.determinant ();

  // Center (x , y, z)
  model_coefficients.resize (4);
  model_coefficients[0] = 0.5f * m12 / m11;
  model_coefficients[1] = 0.5f * m13 / m11;
  model_coefficients[2] = 0.5f * m14 / m11;
  // Radius
  model_coefficients[3] = sqrtf (
                                 model_coefficients[0] * model_coefficients[0] +
                                 model_coefficients[1] * model_coefficients[1] +
                                 model_coefficients[2] * model_coefficients[2] - m15 / m11);

  return (true);
}
예제 #11
0
파일: ICP.cpp 프로젝트: gilbs10/OpenFusion
void ICP::Iterate_ICP(const CameraData& d_newDepthData,
					  const PointInfo& d_renderedVertexMap,
					  float* d_currentTransform)	// This is input & output parameter on cuda
{
	//static const Eigen::Matrix4f m_identity4f = Eigen::Matrix4f::Identity();
//	Eigen::Matrix4f m_identity4f;
//	m_identity4f.setIdentity();
	cutilSafeCall( cudaMemcpy(incrementalTransform, m_identity4f.data(), 16*sizeof(float), cudaMemcpyHostToDevice) );
	
	// d_currentTransform currently holds the previous transformation matrix. Initialize new transformation matrix to the previous
	cutilSafeCall( cudaMemcpy(newTransform, d_currentTransform, 16*sizeof(float), cudaMemcpyDeviceToDevice) );

	int iterationCounter = 0;

	cutilSafeCall(cudaMemcpy(bestTransform, newTransform, 16*sizeof(float), cudaMemcpyDeviceToDevice));

	// While loop - until we get minimum transformation matrix:
	do {
		iterationCounter++;

		// Copy current transform to its matching field.

		cutilSafeCall( cudaMemcpy(invLastTransform_host, newTransform, 16*sizeof(float), cudaMemcpyDeviceToHost) );
		//cutilSafeCall( cudaMemcpy(invLastTransformMatrix.data(), newTransform, 16*sizeof(float), cudaMemcpyDeviceToHost) );
		cudaDeviceSynchronize();
		// Calculate inverse of current transformation matrix and store in invLastTransformMatrix.
		Eigen::Map<Eigen::Matrix<float, 4, 4, Eigen::RowMajor> > invLastTransformMatrix(invLastTransform_host);	// TODO: COLMAJOR IS ALSO WORKING WELL. CHECK OUT. MAYBE BUGGY
		invLastTransformMatrix = invLastTransformMatrix.inverse();
		
		// Copy current inverse transformation matrix into cuda.
		cutilSafeCall( cudaMemcpy(invLastTransform, invLastTransform_host, 16*sizeof(float), cudaMemcpyHostToDevice) );
		cudaDeviceSynchronize();

		// Find corresponding points and build A and b matrices.
		FindCorresponding (d_newDepthData, d_renderedVertexMap);

		//////calculating Xopt = (At * A)^(-1) * At * b  using tree reduction//////

		cutilSafeCall(cudaDeviceSynchronize());
		ClearHostMatrices();

		cutilSafeCall(cudaMemcpy(AtA_host, AtA, NUM_VARS*NUM_VARS*m_numCorrespondenceBlocks*sizeof(float), cudaMemcpyDeviceToHost));
		cutilSafeCall(cudaMemcpy(Atb_host, Atb, NUM_VARS*m_numCorrespondenceBlocks*sizeof(float), cudaMemcpyDeviceToHost));
		
		for (int i = 0; i < m_numCorrespondenceBlocks ; ++i) {
			for (int j = 0; j < NUM_VARS*NUM_VARS; ++j) {
				AtA_sum[j] += AtA_host[i * NUM_VARS*NUM_VARS + j];
			}
			for (int j = 0; j < NUM_VARS; ++j) {
				Atb_sum[j] += Atb_host[i * NUM_VARS + j];
			}
		}
		for (int i = 0; i < NUM_VARS; ++i) {
			for (int j = 0; j < i; ++j) {
				AtA_sum[i * NUM_VARS + j] = AtA_sum[j * NUM_VARS + i];
			}
		}

		Eigen::Matrix<float, NUM_VARS, NUM_VARS, Eigen::RowMajor> AtA_eigen;
		Eigen::Matrix<float, NUM_VARS, 1> Atb_eigen;

		for (int i = 0; i < NUM_VARS; ++i) {
			Atb_eigen(i) = Atb_sum[i];
			for (int j = 0; j < NUM_VARS; ++j) {
				AtA_eigen(i, j) = AtA_sum[i * NUM_VARS + j];
			}
		}

		float det = AtA_eigen.determinant();
		if (isnan(det) || det == 0.f || fabs(det) < _EPSILON_) {
			// TODO - PROBLEM! MATRIX IS SINGULAR. HANDLE
			cutilSafeCall( cudaMemcpy(newTransform, d_currentTransform, 16*sizeof(float), cudaMemcpyDeviceToDevice) );
			cout << "       No transform found." << endl;
			break;
		}

		Eigen::Matrix<float, NUM_VARS, 1> parameters = AtA_eigen.llt().solve(Atb_eigen).cast<float>();
		
		for (int i = 0; i < 3; ++i) {
			xOpt_host[i] = -parameters(i);		// TODO - CONSIDER WRITE -parameters(i) (minus) like in KinectShape
		}

		for (int i = 3; i < 6; ++i) {	// Angles are negated, translations are positive
			xOpt_host[i] = parameters(i);
		}

		cutilSafeCall(cudaMemcpy(xOpt, xOpt_host, NUM_VARS*sizeof(float), cudaMemcpyHostToDevice));

		BuildNewTransformMatrix();

		CameraHandler::updateCameraData(m_gridSize, m_blockSize, d_newDepthData, NULL, newTransform, NULL, NULL);
		cutilSafeCall(cudaDeviceSynchronize());

	} while (iterationCounter < m_maxIterations);
		
	cutilSafeCall(cudaMemcpy(d_currentTransform, newTransform, 16*sizeof(float), cudaMemcpyDeviceToDevice));
	
	// The output is in @Param d_currentTransform
}
void pairAlign( const PointCloud::Ptr cloud_src , const PointCloud::Ptr cloud_tgt , Eigen::Matrix4f &final_transform )
{

    PointCloud::Ptr src (new PointCloud);
    PointCloud::Ptr tgt (new PointCloud);

    *src = *cloud_src;
    *tgt = *cloud_tgt;

    // Compute surface normals and curvature
    PointCloudWithNormals::Ptr points_with_normals_src (new PointCloudWithNormals);
    PointCloudWithNormals::Ptr points_with_normals_tgt (new PointCloudWithNormals);

    pcl::NormalEstimation<PointT, PointNormalT> norm_est;
    pcl::search::KdTree<PointT>::Ptr tree (new pcl::search::KdTree<PointT> ());
    norm_est.setSearchMethod (tree);
    norm_est.setKSearch (30);

    norm_est.setInputCloud (src);
    norm_est.compute (*points_with_normals_src);
    pcl::copyPointCloud (*src, *points_with_normals_src);

    norm_est.setInputCloud (tgt);
    norm_est.compute (*points_with_normals_tgt);
    pcl::copyPointCloud (*tgt, *points_with_normals_tgt);

    // Instantiate our custom point representation (defined above) ...
    MyPointRepresentation point_representation;
    // ... and weight the 'curvature' dimension so that it is balanced against x, y, and z
    float alpha[4] = {1.0, 1.0, 1.0, 1.0};
    point_representation.setRescaleValues (alpha);

    //
    // Align
    pcl::IterativeClosestPointNonLinear<PointNormalT, PointNormalT> reg;
    reg.setTransformationEpsilon (1e-3);
    // Set the maximum distance between two correspondences (src<->tgt) to 10cm
    // Note: adjust this based on the size of your datasets
    reg.setMaxCorrespondenceDistance (0.5);
    // Set the point representation
    reg.setPointRepresentation (boost::make_shared<const MyPointRepresentation> (point_representation));

    reg.setInputSource (points_with_normals_src);
    reg.setInputTarget (points_with_normals_tgt);



    //
    // Run the same optimization in a loop and visualize the results
    Eigen::Matrix4f Ti = Eigen::Matrix4f::Identity (), prev, targetToSource;
    PointCloudWithNormals::Ptr reg_result = points_with_normals_src;
    reg.setMaximumIterations (2);

    //originally iterates up to 30
    for (int i = 0; i < 10; ++i)
    {

        // save cloud for visualization purpose
        points_with_normals_src = reg_result;

        // Estimate
        reg.setInputSource (points_with_normals_src);
        reg.align (*reg_result);

            //accumulate transformation between each Iteration
        Ti = reg.getFinalTransformation () * Ti;

            //if the difference between this transformation and the previous one
            //is smaller than the threshold, refine the process by reducing
            //the maximal correspondence distance
        if (fabs ((reg.getLastIncrementalTransformation () - prev).sum ()) < reg.getTransformationEpsilon ())
        {
            reg.setMaxCorrespondenceDistance (reg.getMaxCorrespondenceDistance () - 0.001);
        }
        prev = reg.getLastIncrementalTransformation ();

    }

    //
    // Get the transformation from target to source
    targetToSource = Ti.inverse();

    final_transform = targetToSource;

}
예제 #13
0
/* ---[ */
int
main (int argc, char** argv)
{
  print_info ("Transform a cloud. For more information, use: %s -h\n", argv[0]);

  bool help = false;
  parse_argument (argc, argv, "-h", help);
  if (argc < 3 || help)
  {
    printHelp (argc, argv);
    return (-1);
  }

  // Parse the command line arguments for .pcd files
  std::vector<int> p_file_indices;
  p_file_indices = parse_file_extension_argument (argc, argv, ".pcd");
  if (p_file_indices.size () != 2)
  {
    print_error ("Need one input PCD file and one output PCD file to continue.\n");
    return (-1);
  }

  // Initialize the transformation matrix
  Eigen::Matrix4f tform; 
  tform.setIdentity ();

  // Command line parsing
  float dx, dy, dz;
  std::vector<float> values;

  if (parse_3x_arguments (argc, argv, "-trans", dx, dy, dz) > -1)
  {
    tform (0, 3) = dx;
    tform (1, 3) = dy;
    tform (2, 3) = dz;
  }

  if (parse_x_arguments (argc, argv, "-quat", values) > -1)
  {
    if (values.size () == 4)
    {
      const float& x = values[0];
      const float& y = values[1];
      const float& z = values[2];
      const float& w = values[3];
      tform.topLeftCorner (3, 3) = Eigen::Matrix3f (Eigen::Quaternionf (w, x, y, z));
    }
    else
    {
      print_error ("Wrong number of values given (%zu): ", values.size ());
      print_error ("The quaternion specified with -quat must contain 4 elements (w,x,y,z).\n");
    }
  }

  if (parse_x_arguments (argc, argv, "-axisangle", values) > -1)
  {
    if (values.size () == 4)
    {
      const float& ax = values[0];
      const float& ay = values[1];
      const float& az = values[2];
      const float& theta = values[3];
      tform.topLeftCorner (3, 3) = Eigen::Matrix3f (Eigen::AngleAxisf (theta, Eigen::Vector3f (ax, ay, az)));
    }
    else
    {
      print_error ("Wrong number of values given (%zu): ", values.size ());
      print_error ("The rotation specified with -axisangle must contain 4 elements (ax,ay,az,theta).\n");
    }
  }

  if (parse_x_arguments (argc, argv, "-matrix", values) > -1)
  {
    if (values.size () == 9 || values.size () == 16)
    {
      int n = values.size () == 9 ? 3 : 4;
      for (int r = 0; r < n; ++r)
        for (int c = 0; c < n; ++c)
          tform (r, c) = values[n*r+c];
    }
    else
    {
      print_error ("Wrong number of values given (%zu): ", values.size ());
      print_error ("The transformation specified with -matrix must be 3x3 (9) or 4x4 (16).\n");
    }
  }

  // Load the first file
  sensor_msgs::PointCloud2::Ptr cloud (new sensor_msgs::PointCloud2);
  if (!loadCloud (argv[p_file_indices[0]], *cloud)) 
    return (-1);

  // Apply the transform
  sensor_msgs::PointCloud2 output;
  compute (cloud, output, tform);

  // Check if a scaling parameter has been given
  double divider[3];
  if (parse_3x_arguments (argc, argv, "-scale", divider[0], divider[1], divider[2]) > -1)
  {
    print_highlight ("Scaling XYZ data with the following values: %f, %f, %f\n", divider[0], divider[1], divider[2]);
    scaleInPlace (output, divider);
  }

  // Save into the second file
  saveCloud (argv[p_file_indices[1]], output);
}
예제 #14
0
int volumetric_knt_cuda(int argc, char **argv)
{
	Timer timer;
	int vol_size = vx_count * vx_size;
	float half_vol_size = vol_size * 0.5f;

	Eigen::Vector3i voxel_size(vx_size, vx_size, vx_size);
	Eigen::Vector3i volume_size(vol_size, vol_size, vol_size);
	Eigen::Vector3i voxel_count(vx_count, vx_count, vx_count);
	int total_voxels = voxel_count.x() * voxel_count.y() * voxel_count.z();


	std::cout << std::fixed
		<< "Voxel Count  : " << voxel_count.transpose() << std::endl
		<< "Voxel Size   : " << voxel_size.transpose() << std::endl
		<< "Volume Size  : " << volume_size.transpose() << std::endl
		<< "Total Voxels : " << total_voxels << std::endl
		<< std::endl;

	timer.start();
	KinectFrame knt(filepath);
	timer.print_interval("Importing knt frame : ");

	Eigen::Affine3f grid_affine = Eigen::Affine3f::Identity();
	grid_affine.translate(Eigen::Vector3f(0, 0, half_vol_size));
	grid_affine.scale(Eigen::Vector3f(1, 1, 1));	// z is negative inside of screen
	Eigen::Matrix4f grid_matrix = grid_affine.matrix();

	float knt_near_plane = 0.1f;
	float knt_far_plane = 10240.0f;
	Eigen::Matrix4f projection = perspective_matrix<float>(KINECT_V2_FOVY, KINECT_V2_DEPTH_ASPECT_RATIO, knt_near_plane, knt_far_plane);
	Eigen::Matrix4f projection_inverse = projection.inverse();
	Eigen::Matrix4f view_matrix = Eigen::Matrix4f::Identity();

	std::vector<float4> vertices(knt.depth.size(), make_float4(0, 0, 0, 1));
	std::vector<float4> normals(knt.depth.size(), make_float4(0, 0, 1, 1));
	std::vector<Eigen::Vector2f> grid_voxels_params(total_voxels);

	// 
	// setup image parameters
	//
	unsigned short image_width = KINECT_V2_DEPTH_WIDTH;
	unsigned short image_height = image_width / aspect_ratio;
	QImage img(image_width, image_height, QImage::Format::Format_RGBA8888);
	img.fill(Qt::GlobalColor::gray);
	uchar4* image_data = (uchar4*)img.bits();
	//float4* debug_buffer = new float4[image_width * image_height];
	//memset(debug_buffer, 0, image_width * image_height * sizeof(float4));

	knt_cuda_setup(
		vx_count, vx_size,
		grid_matrix.data(),
		projection.data(),
		projection_inverse.data(),
		*grid_voxels_params.data()->data(),
		KINECT_V2_DEPTH_WIDTH,
		KINECT_V2_DEPTH_HEIGHT,
		KINECT_V2_DEPTH_MIN,
		KINECT_V2_DEPTH_MAX,
		vertices.data()[0],
		normals.data()[0],
		image_width,
		image_height
		);

	timer.start();
	knt_cuda_allocate();
	knt_cuda_init_grid();
	timer.print_interval("Allocating gpu      : ");

	timer.start();
	knt_cuda_copy_host_to_device();
	knt_cuda_copy_depth_buffer_to_device(knt.depth.data());
	timer.print_interval("Copy host to device : ");

	timer.start();
	knt_cuda_normal_estimation();
	timer.print_interval("Normal estimation   : ");

	timer.start();
	knt_cuda_update_grid(view_matrix.data());
	timer.print_interval("Update grid         : ");

	timer.start();
	knt_cuda_grid_params_copy_device_to_host();
	knt_cuda_copy_device_to_host();
	timer.print_interval("Copy device to host : ");




	//
	// setup camera parameters
	//
	timer.start();
	Eigen::Affine3f camera_to_world = Eigen::Affine3f::Identity();
	float cam_z = -half_vol_size;
	camera_to_world.scale(Eigen::Vector3f(1, 1, -1));
	camera_to_world.translate(Eigen::Vector3f(half_vol_size, half_vol_size, cam_z));

	
	Eigen::Matrix4f camera_to_world_matrix = camera_to_world.matrix();
		
	knt_cuda_raycast(KINECT_V2_FOVY, KINECT_V2_DEPTH_ASPECT_RATIO, camera_to_world_matrix.data());
	timer.print_interval("Raycast             : ");

	timer.start();
	knt_cuda_copy_image_device_to_host(*(uchar4*)img.bits());
	timer.print_interval("Copy Img to host    : ");
	
	timer.start();
	knt_cuda_free();
	timer.print_interval("Cleanup gpu         : ");

#if 0
	//memset(image_data, 0, image_width * image_height * sizeof(uchar4));
	//memset(debug_buffer, 0, image_width * image_height * sizeof(float4));

	Eigen::Vector3f camera_pos = camera_to_world_matrix.col(3).head<3>();
	float fov_scale = (float)tan(DegToRad(KINECT_V2_FOVY * 0.5f));
	float aspect_ratio = KINECT_V2_DEPTH_ASPECT_RATIO;


	//
	// for each pixel, trace a ray
	//
	timer.start();
	for (int y = 0; y < image_height; ++y)
	{
		for (int x = 0; x < image_width; ++x)
		{
			// Convert from image space (in pixels) to screen space
			// Screen Space along X axis = [-aspect ratio, aspect ratio] 
			// Screen Space along Y axis = [-1, 1]
			float x_norm = (2.f * float(x) + 0.5f) / (float)image_width;
			float y_norm = (2.f * float(y) + 0.5f) / (float)image_height;
			Eigen::Vector3f screen_coord(
				(x_norm - 1.f) * aspect_ratio * fov_scale,
				(1.f - y_norm) * fov_scale,
				1.0f);

			Eigen::Vector3f direction;
			multDirMatrix(screen_coord, camera_to_world_matrix, direction);
			direction.normalize();

			long voxels_zero_crossing[2] = { -1, -1 };

			int hit_count = raycast_tsdf_volume<float>(
				camera_pos,
				direction,
				voxel_count.cast<int>(),
				voxel_size.cast<int>(),
				grid_voxels_params,
				voxels_zero_crossing);

			if (hit_count > 0)
			{
				if (hit_count == 2)
				{
					float4 n = normals[y * image_width + x];

					//image_data[y * image_width + x].x = 0;
					//image_data[y * image_width + x].y = 128;
					//image_data[y * image_width + x].z = 128;
					//image_data[y * image_width + x].w = 255;
					
					image_data[y * image_width + x].x = uchar((n.x * 0.5f + 0.5f) * 255);
					image_data[y * image_width + x].y = uchar((n.y * 0.5f + 0.5f) * 255);
					image_data[y * image_width + x].z = uchar((n.z * 0.5f + 0.5f) * 255);
					image_data[y * image_width + x].w = 255;
				}
				else
				{
					image_data[y * image_width + x].x = 128;
					image_data[y * image_width + x].y = 128;
					image_data[y * image_width + x].z = 0;
					image_data[y * image_width + x].w = 255;
				}
			}
			else
			{
				image_data[y * image_width + x].x = 128;
				image_data[y * image_width + x].y = 0;
				image_data[y * image_width + x].z = 0;
				image_data[y * image_width + x].w = 255;
			}
		}
	}
	timer.print_interval("Raycasting to image     : ");
	//export_debug_buffer("../../data/cpu_image_data_screen_coord_f4.txt", debug_buffer, image_width, image_height);
	//export_image_buffer("../../data/cpu_image_data_screen_coord_uc.txt", image_data, image_width, image_height);
#else

	//export_debug_buffer("../../data/gpu_image_data_screen_coord_f4.txt", debug_buffer, image_width, image_height);
	//export_image_buffer("../../data/gpu_image_data_screen_coord_uc.txt", image_data, image_width, image_height);
#endif

	



	QImage image(&image_data[0].x, image_width, image_height, QImage::Format_RGBA8888);
	//image.fill(Qt::GlobalColor::black);
	QApplication app(argc, argv);
	QImageWidget widget;
	widget.resize(KINECT_V2_DEPTH_WIDTH, KINECT_V2_DEPTH_HEIGHT);
	widget.setImage(image);
	widget.show();

	return app.exec();
}
예제 #15
0
void ComputePnP(const char* folder_name,int start_idx,int total_num,int jump_num){//Compute two-view PnP
	int img_idx = start_idx;
	int img_num = total_num;
	int img_num2 = jump_num;

	char imgname[64];
	char pcdname[64];
	char imgname2[64];
	char pcdname2[64];


	ofstream out_pose,out_pose2;
	out_pose.open("RT.txt");
	out_pose2.open("invRT.txt");
 cv::Mat colorImage_1(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_8UC4);
 cv::Mat pointCloud_XYZforRGB_1(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_32FC3,cv::Scalar::all(0));

 cv::Mat colorImage_2(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_8UC4);
 cv::Mat pointCloud_XYZforRGB_2(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_32FC3,cv::Scalar::all(0));
 
 
 cv::Mat img1,img2;
  /////////////////////PCL objects//////////////////////////

  pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_1 (new pcl::PointCloud<pcl::PointXYZRGBA>);
  pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_2 (new pcl::PointCloud<pcl::PointXYZRGBA>);
  pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_1f (new pcl::PointCloud<pcl::PointXYZRGBA>);
  pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_2f (new pcl::PointCloud<pcl::PointXYZRGBA>);

  
	pcl::visualization::PCLVisualizer vislm; 

////////////////////Find cv::Matched points between two RGB images////////////////////////
	ifstream cam_in("CamPara.txt");
	cam_in>>CamIntrinsic[0][0]>>CamIntrinsic[0][1]>>CamIntrinsic[0][2]
	>>CamIntrinsic[1][0]>>CamIntrinsic[1][1]>>CamIntrinsic[1][2]
	>>CamIntrinsic[2][0]>>CamIntrinsic[2][1]>>CamIntrinsic[2][2];

	cam_in.close();

	const cv::Mat Camera_Matrix(3,3,CV_64F,CamIntrinsic);
	const cv::Mat disCoef(1,5,CV_64F,DisCoef);

	cv::Mat img_Matches;

	int numKeyPoints = 400; 
	int numKeyPoints2 = 400; 

	RobustMatcher rMatcher; 
 

	cv::Ptr<cv::FeatureDetector> detector = new cv::FastFeatureDetector(0); 
	cv::Ptr<cv::FeatureDetector> detector2 = new cv::FastFeatureDetector(0); 
	cv::Ptr<cv::DescriptorExtractor> extractor = new cv::OrbDescriptorExtractor(); 
	cv::Ptr<cv::DescriptorMatcher> Matcher= new cv::BFMatcher(cv::NORM_HAMMING);
	rMatcher.setFeatureDetector(detector); 
	rMatcher.setDescriptorExtractor(extractor); 
	rMatcher.setDescriptorMatcher(Matcher); 

	std::vector<cv::KeyPoint> img1_keypoints; 
	cv::Mat img1_descriptors; 
	std::vector<cv::KeyPoint> img2_keypoints; 
	cv::Mat img2_descriptors; 

	std::vector<cv::DMatch > Matches; 

 //////////////////////PCL rigid motion estimation///////////////////////////
	Eigen::Matrix4f TotaltransforMation = Eigen::Matrix4f::Identity(); 
	Eigen::Matrix4f Ti = Eigen::Matrix4f::Identity();

	pcl::PointCloud<pcl::PointXYZRGBA>::Ptr Transcloud_2 (new pcl::PointCloud<pcl::PointXYZRGBA>);

//////////////////////////////////////////////////////////////////////////////

	printf("From %d to %d",img_idx,img_idx+(img_num-1)*img_num2);

	//Camera position
	pcl::PointCloud<pcl::PointXYZ> Camera_pose0;
	pcl::PointCloud<pcl::PointXYZ> Camera_pose;

	Camera_pose0.push_back(pcl::PointXYZ(0,0,0));
	Camera_pose0.push_back(pcl::PointXYZ(0.2,0,0));
	Camera_pose0.push_back(pcl::PointXYZ(0,0.2,0));
	Camera_pose0.push_back(pcl::PointXYZ(0,0,0.2));


	/////////////Looping///////////////////////////
	int d = 0;
	for(int i=0; i<img_num-1; ++i, d+=img_num2){

		sprintf(imgname,"%s/rgb_%d.jpg",folder_name,img_idx+d);

    	sprintf(pcdname,"%s/pcd_%d",folder_name,img_idx+d);

		sprintf(imgname2,"%s/rgb_%d.jpg",folder_name,img_idx+d+img_num2);

    	sprintf(pcdname2,"%s/pcd_%d",folder_name,img_idx+d+img_num2);

		printf("comparing %s & %s\n",imgname,imgname2);

		 ////////////Reading data/////////////////////
		 img1 = cv::imread(imgname,CV_LOAD_IMAGE_GRAYSCALE);
		 colorImage_1 = cv::imread(imgname,CV_LOAD_IMAGE_COLOR);
 
		 printf("reading\n");
		 FILE* fin = fopen(pcdname,"rb");
		 fread(pointCloud_XYZforRGB_1.data,1,pointCloud_XYZforRGB_1.step*pointCloud_XYZforRGB_1.rows,fin);
		 fclose(fin);


		 img2 = cv::imread(imgname2,CV_LOAD_IMAGE_GRAYSCALE);
		 colorImage_2 = cv::imread(imgname2,CV_LOAD_IMAGE_COLOR);


		 printf("reading\n");
		 fin = fopen(pcdname2,"rb");
		 fread(pointCloud_XYZforRGB_2.data,1,pointCloud_XYZforRGB_2.step*pointCloud_XYZforRGB_2.rows,fin);
		 fclose(fin);

		 cloud_1->clear();
		 cloud_2->clear();
		 Mat2PCL_XYZRGB_ALL(colorImage_1,pointCloud_XYZforRGB_1,*cloud_1);
		 Mat2PCL_XYZRGB_ALL(colorImage_2,pointCloud_XYZforRGB_2,*cloud_2);

        ///////////////////Finding 2D features//////////////////////////

		img1_keypoints.clear();
		img2_keypoints.clear();

		detector->detect(img1,img1_keypoints); 
		extractor->compute(img1,img1_keypoints,img1_descriptors); 
	
		detector2->detect(img2,img2_keypoints); 
		extractor->compute(img2,img2_keypoints,img2_descriptors); 

		Matches.clear();

		printf("cv::Matching\n");

		rMatcher.match(Matches, img1_keypoints, img2_keypoints,img1_descriptors,img2_descriptors); 

		//printf("drawing\n");
		drawMatches(img1, img1_keypoints, img2, img2_keypoints,Matches, img_Matches, cv::Scalar::all(-1), cv::Scalar::all(-1),vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
		cv::imshow("Good matches",img_Matches);
		cv::waitKey(10);
		///////////////////Find corresponding 3D pints////////////////////////////////////////
		//We should draw point cloud from the Matches...  
		//////////////////3D geometry///////////////////////////////////////////////////////

		 ///////////////////////// 
		 //Compute PnP
		 //////////////////////// 
		cv::Mat rvec(1,3,cv::DataType<double>::type);
		cv::Mat tvec(1,3,cv::DataType<double>::type);
		cv::Mat rotationMatrix(3,3,cv::DataType<double>::type);

		vector<cv::Point2f> p2ds;
		vector<cv::Point3f> p3ds;
		 
		Mat2PCL_XYZRGB_MATCH_PnP(pointCloud_XYZforRGB_1,p3ds,p2ds,img1_keypoints,img2_keypoints,Matches);
		printf("3D/2D points:%d,%d\n",p3ds.size(),p2ds.size());

		if(p3ds.size() > 5){
			cv::solvePnPRansac(p3ds,p2ds,Camera_Matrix,disCoef,rvec,tvec);
			cv::Rodrigues(rvec,rotationMatrix);
			PnPret2Mat4f(rotationMatrix,tvec,Ti);
			Ti = Ti.inverse();


		}
		///////////////////////Compose the translation, and transform the point cloud////////////////

		Transcloud_2->clear();


		std::cout<<"\nLocal motion from PnP is \n"<<Ti;
		TotaltransforMation = TotaltransforMation*Ti;
		pcl::transformPointCloud(*cloud_2,*Transcloud_2,TotaltransforMation);

		std::cout<<"\nTotal motion from PnP is \n"<<TotaltransforMation<<endl; 

       			//Add camera coordinate
			pcl::transformPointCloud(Camera_pose0,Camera_pose,TotaltransforMation);
			sprintf(pcdname2,"X%d",img_idx+d+img_num2);
			vislm.addLine(Camera_pose[0],Camera_pose[1],255,0,0,pcdname2);
			sprintf(pcdname2,"Y%d",img_idx+d+img_num2);
			vislm.addLine(Camera_pose[0],Camera_pose[2],0,255,0,pcdname2);
			sprintf(pcdname2,"Z%d",img_idx+d+img_num2);
			vislm.addLine(Camera_pose[0],Camera_pose[3],0,0,255,pcdname2);

			if(i==0){
				vislm.addPointCloud(cloud_1->makeShared(),pcdname);
				//PCLviewer.showCloud(cloud_1,"init");
				out_pose<<Eigen::Matrix4f::Identity()<<endl;
			}

			vislm.addPointCloud(Transcloud_2->makeShared(),pcdname2); 
			out_pose<<TotaltransforMation<<endl; 
			out_pose2<<TotaltransforMation.inverse()<<endl; 
	}
	vislm.resetCamera(); 
	vislm.spin();

	out_pose.close();
	out_pose2.close();
}