示例#1
0
文件: kinfu.cpp 项目: BITVoyager/pcl
bool
pcl::gpu::KinfuTracker::operator() (const DepthMap& depth_raw, 
    Eigen::Affine3f *hint)
{  
  device::Intr intr (fx_, fy_, cx_, cy_);

  if (!disable_icp_)
  {
      {
        //ScopeTime time(">>> Bilateral, pyr-down-all, create-maps-all");
        //depth_raw.copyTo(depths_curr[0]);
        device::bilateralFilter (depth_raw, depths_curr_[0]);

        if (max_icp_distance_ > 0)
          device::truncateDepth(depths_curr_[0], max_icp_distance_);

        for (int i = 1; i < LEVELS; ++i)
          device::pyrDown (depths_curr_[i-1], depths_curr_[i]);

        for (int i = 0; i < LEVELS; ++i)
        {
          device::createVMap (intr(i), depths_curr_[i], vmaps_curr_[i]);
          //device::createNMap(vmaps_curr_[i], nmaps_curr_[i]);
          computeNormalsEigen (vmaps_curr_[i], nmaps_curr_[i]);
        }
        pcl::device::sync ();
      }

      //can't perform more on first frame
      if (global_time_ == 0)
      {
        Matrix3frm init_Rcam = rmats_[0]; //  [Ri|ti] - pos of camera, i.e.
        Vector3f   init_tcam = tvecs_[0]; //  transform from camera to global coo space for (i-1)th camera pose

        Mat33&  device_Rcam = device_cast<Mat33> (init_Rcam);
        float3& device_tcam = device_cast<float3>(init_tcam);

        Matrix3frm init_Rcam_inv = init_Rcam.inverse ();
        Mat33&   device_Rcam_inv = device_cast<Mat33> (init_Rcam_inv);
        float3 device_volume_size = device_cast<const float3>(tsdf_volume_->getSize());

        //integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcam_inv, device_tcam, tranc_dist, volume_);    
        device::integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcam_inv, device_tcam, tsdf_volume_->getTsdfTruncDist(), tsdf_volume_->data(), depthRawScaled_);

        for (int i = 0; i < LEVELS; ++i)
          device::tranformMaps (vmaps_curr_[i], nmaps_curr_[i], device_Rcam, device_tcam, vmaps_g_prev_[i], nmaps_g_prev_[i]);

        ++global_time_;
        return (false);
      }

      ///////////////////////////////////////////////////////////////////////////////////////////
      // Iterative Closest Point
      Matrix3frm Rprev = rmats_[global_time_ - 1]; //  [Ri|ti] - pos of camera, i.e.
      Vector3f   tprev = tvecs_[global_time_ - 1]; //  tranfrom from camera to global coo space for (i-1)th camera pose
      Matrix3frm Rprev_inv = Rprev.inverse (); //Rprev.t();

      //Mat33&  device_Rprev     = device_cast<Mat33> (Rprev);
      Mat33&  device_Rprev_inv = device_cast<Mat33> (Rprev_inv);
      float3& device_tprev     = device_cast<float3> (tprev);
      Matrix3frm Rcurr;
      Vector3f tcurr;
      if(hint)
      {
        Rcurr = hint->rotation().matrix();
        tcurr = hint->translation().matrix();
      }
      else
      {
        Rcurr = Rprev; // transform to global coo for ith camera pose
        tcurr = tprev;
      }
      {
        //ScopeTime time("icp-all");
        for (int level_index = LEVELS-1; level_index>=0; --level_index)
        {
          int iter_num = icp_iterations_[level_index];

          MapArr& vmap_curr = vmaps_curr_[level_index];
          MapArr& nmap_curr = nmaps_curr_[level_index];

          //MapArr& vmap_g_curr = vmaps_g_curr_[level_index];
          //MapArr& nmap_g_curr = nmaps_g_curr_[level_index];

          MapArr& vmap_g_prev = vmaps_g_prev_[level_index];
          MapArr& nmap_g_prev = nmaps_g_prev_[level_index];

          //CorespMap& coresp = coresps_[level_index];

          for (int iter = 0; iter < iter_num; ++iter)
          {
            Mat33&  device_Rcurr = device_cast<Mat33> (Rcurr);
            float3& device_tcurr = device_cast<float3>(tcurr);

            Eigen::Matrix<double, 6, 6, Eigen::RowMajor> A;
            Eigen::Matrix<double, 6, 1> b;
    #if 0
            device::tranformMaps(vmap_curr, nmap_curr, device_Rcurr, device_tcurr, vmap_g_curr, nmap_g_curr);
            findCoresp(vmap_g_curr, nmap_g_curr, device_Rprev_inv, device_tprev, intr(level_index), vmap_g_prev, nmap_g_prev, distThres_, angleThres_, coresp);
            device::estimateTransform(vmap_g_prev, nmap_g_prev, vmap_g_curr, coresp, gbuf_, sumbuf_, A.data(), b.data());

            //cv::gpu::GpuMat ma(coresp.rows(), coresp.cols(), CV_32S, coresp.ptr(), coresp.step());
            //cv::Mat cpu;
            //ma.download(cpu);
            //cv::imshow(names[level_index] + string(" --- coresp white == -1"), cpu == -1);
    #else
            estimateCombined (device_Rcurr, device_tcurr, vmap_curr, nmap_curr, device_Rprev_inv, device_tprev, intr (level_index),
                              vmap_g_prev, nmap_g_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data ());
    #endif
            //checking nullspace
            double det = A.determinant ();

            if (fabs (det) < 1e-15 || pcl_isnan (det))
            {
              if (pcl_isnan (det)) cout << "qnan" << endl;

              reset ();
              return (false);
            }
            //float maxc = A.maxCoeff();

            Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b).cast<float>();
            //Eigen::Matrix<float, 6, 1> result = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b);

            float alpha = result (0);
            float beta  = result (1);
            float gamma = result (2);

            Eigen::Matrix3f Rinc = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ());
            Vector3f tinc = result.tail<3> ();

            //compose
            tcurr = Rinc * tcurr + tinc;
            Rcurr = Rinc * Rcurr;
          }
        }
      }
      //save transform
      rmats_.push_back (Rcurr);
      tvecs_.push_back (tcurr);
  } 
  else /* if (disable_icp_) */
  {
      if (global_time_ == 0)
        ++global_time_;

      Matrix3frm Rcurr = rmats_[global_time_ - 1];
      Vector3f   tcurr = tvecs_[global_time_ - 1];

      rmats_.push_back (Rcurr);
      tvecs_.push_back (tcurr);

  }

  Matrix3frm Rprev = rmats_[global_time_ - 1];
  Vector3f   tprev = tvecs_[global_time_ - 1];

  Matrix3frm Rcurr = rmats_.back();
  Vector3f   tcurr = tvecs_.back();

  ///////////////////////////////////////////////////////////////////////////////////////////
  // Integration check - We do not integrate volume if camera does not move.  
  float rnorm = rodrigues2(Rcurr.inverse() * Rprev).norm();
  float tnorm = (tcurr - tprev).norm();  
  const float alpha = 1.f;
  bool integrate = (rnorm + alpha * tnorm)/2 >= integration_metric_threshold_;

  if (disable_icp_)
    integrate = true;

  ///////////////////////////////////////////////////////////////////////////////////////////
  // Volume integration
  float3 device_volume_size = device_cast<const float3> (tsdf_volume_->getSize());

  Matrix3frm Rcurr_inv = Rcurr.inverse ();
  Mat33&  device_Rcurr_inv = device_cast<Mat33> (Rcurr_inv);
  float3& device_tcurr = device_cast<float3> (tcurr);
  if (integrate)
  {
    //ScopeTime time("tsdf");
    //integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcurr_inv, device_tcurr, tranc_dist, volume_);
    integrateTsdfVolume (depth_raw, intr, device_volume_size, device_Rcurr_inv, device_tcurr, tsdf_volume_->getTsdfTruncDist(), tsdf_volume_->data(), depthRawScaled_);
  }

  ///////////////////////////////////////////////////////////////////////////////////////////
  // Ray casting
  Mat33& device_Rcurr = device_cast<Mat33> (Rcurr);
  {
    //ScopeTime time("ray-cast-all");
    raycast (intr, device_Rcurr, device_tcurr, tsdf_volume_->getTsdfTruncDist(), device_volume_size, tsdf_volume_->data(), vmaps_g_prev_[0], nmaps_g_prev_[0]);
    for (int i = 1; i < LEVELS; ++i)
    {
      resizeVMap (vmaps_g_prev_[i-1], vmaps_g_prev_[i]);
      resizeNMap (nmaps_g_prev_[i-1], nmaps_g_prev_[i]);
    }
    pcl::device::sync ();
  }

  ++global_time_;
  return (true);
}
示例#2
0
文件: kinfu.cpp 项目: neeljp/pcl
inline bool 
pcl::gpu::kinfuLS::KinfuTracker::performPairWiseICP(const Intr cam_intrinsics, Matrix3frm& resulting_rotation , Vector3f& resulting_translation)
{ 
  // we assume that both v and n maps are in the same coordinate space
  // initialize rotation and translation to respectively identity and zero
  Matrix3frm previous_rotation = Eigen::Matrix3f::Identity ();
  Matrix3frm previous_rotation_inv = previous_rotation.inverse ();
  Vector3f previous_translation = Vector3f(0,0,0);
  
 ///////////////////////////////////////////////
  // Convert pose to device type
  Mat33  device_cam_rot_prev_inv; 
  float3 device_cam_trans_prev;
  convertTransforms(previous_rotation_inv, previous_translation, device_cam_rot_prev_inv, device_cam_trans_prev);  
 
  // Initialize output pose to current pose (i.e. identity and zero translation)
  Matrix3frm current_rotation = previous_rotation;
  Vector3f current_translation = previous_translation;
   
  ///////////////////////////////////////////////
  // Run ICP
  {
    //ScopeTime time("icp-all");
    for (int level_index = LEVELS-1; level_index>=0; --level_index)
    {
      int iter_num = icp_iterations_[level_index];
      
      // current vertex and normal maps
      MapArr& vmap_curr = vmaps_curr_[level_index];
      MapArr& nmap_curr = nmaps_curr_[level_index];   
      
      // previous vertex and normal maps
      MapArr& vmap_prev = vmaps_prev_[level_index];
      MapArr& nmap_prev = nmaps_prev_[level_index];

      // no need to transform maps from global to local since they are both in camera coordinates
      
      // run ICP for iter_num iterations (return false when lost)
      for (int iter = 0; iter < iter_num; ++iter)
      {
        //CONVERT POSES TO DEVICE TYPES
        // CURRENT LOCAL POSE
        Mat33  device_current_rotation = device_cast<Mat33> (current_rotation);      
        float3 device_current_translation_local = device_cast<float3> (current_translation);
                
        Eigen::Matrix<double, 6, 6, Eigen::RowMajor> A;
        Eigen::Matrix<double, 6, 1> b;

        // call the ICP function (see paper by Kok-Lim Low "Linear Least-squares Optimization for Point-to-Plane ICP Surface Registration")
        estimateCombined (device_current_rotation, device_current_translation_local, vmap_curr, nmap_curr, device_cam_rot_prev_inv, device_cam_trans_prev, cam_intrinsics (level_index), 
                          vmap_prev, nmap_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data ());

        // checking nullspace 
        double det = A.determinant ();
        
        if ( fabs (det) < 1e-15 || pcl_isnan (det) )
        {
          if (pcl_isnan (det)) cout << "qnan" << endl;
                    
          PCL_WARN ("ICP PairWise LOST...\n");
          //reset ();
          return (false);
        }

        Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b).cast<float>();
        float alpha = result (0);
        float beta  = result (1);
        float gamma = result (2);

        // deduce incremental rotation and translation from ICP's results
        Eigen::Matrix3f cam_rot_incremental = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ());
        Vector3f cam_trans_incremental = result.tail<3> ();

        //compose global pose
        current_translation = cam_rot_incremental * current_translation + cam_trans_incremental;
        current_rotation = cam_rot_incremental * current_rotation;
      }
    }
  }
  
  ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
  // since raw depthmaps are quite noisy, we make sure the estimated transform is big enought to be taken into account
  float rnorm = rodrigues2(current_rotation).norm();
  float tnorm = (current_translation).norm();    
  const float alpha = 1.f;
  bool integrate = (rnorm + alpha * tnorm)/2 >= integration_metric_threshold_ * 2.0f;
  
  if(integrate)
  {
    resulting_rotation = current_rotation;
    resulting_translation = current_translation;
  }
  else
  {
    resulting_rotation = Eigen::Matrix3f::Identity ();
    resulting_translation = Vector3f(0,0,0);
  }
  // ICP has converged
  return (true);
}
示例#3
0
文件: kinfu.cpp 项目: cgrad/pcl
bool
pcl::gpu::KinfuTracker::operator() (const DepthMap& depth_raw)
{

    device::Intr intr (fx_, fy_, cx_, cy_);
    {
        //ScopeTime time(">>> Bilateral, pyr-down-all, create-maps-all");
        //depth_raw.copyTo(depths_curr[0]);
        device::bilateralFilter (depth_raw, depths_curr_[0]);

        if (max_icp_distance_ > 0)
            device::truncateDepth(depths_curr_[0], max_icp_distance_);

        for (int i = 1; i < LEVELS; ++i)
            device::pyrDown (depths_curr_[i-1], depths_curr_[i]);

        for (int i = 0; i < LEVELS; ++i)
        {
            device::createVMap (intr(i), depths_curr_[i], vmaps_curr_[i]);
            //device::createNMap(vmaps_curr_[i], nmaps_curr_[i]);
            computeNormalsEigen (vmaps_curr_[i], nmaps_curr_[i]);
        }
        pcl::device::sync ();
    }

    //can't perform more on first frame
    if (global_time_ == 0)
    {

        Matrix3frm initial_cam_rot = rmats_[0]; //  [Ri|ti] - pos of camera, i.e.
        Matrix3frm initial_cam_rot_inv = initial_cam_rot.inverse ();
        Vector3f   initial_cam_trans = tvecs_[0]; //  transform from camera to global coo space for (i-1)th camera pose

        Mat33&  device_initial_cam_rot = device_cast<Mat33> (initial_cam_rot);
        Mat33&  device_initial_cam_rot_inv = device_cast<Mat33> (initial_cam_rot_inv);
        float3& device_initial_cam_trans = device_cast<float3>(initial_cam_trans);

        float3 device_volume_size = device_cast<const float3>(tsdf_volume_->getSize());

        device::integrateTsdfVolume(depth_raw, intr, device_volume_size, device_initial_cam_rot_inv, device_initial_cam_trans, tsdf_volume_->getTsdfTruncDist(), tsdf_volume_->data(), getCyclicalBufferStructure (), depthRawScaled_);

        /*
        Matrix3frm init_Rcam = rmats_[0]; //  [Ri|ti] - pos of camera, i.e.
        Vector3f   init_tcam = tvecs_[0]; //  transform from camera to global coo space for (i-1)th camera pose

        Mat33&  device_Rcam = device_cast<Mat33> (init_Rcam);
        float3& device_tcam = device_cast<float3>(init_tcam);

        Matrix3frm init_Rcam_inv = init_Rcam.inverse ();
        Mat33&   device_Rcam_inv = device_cast<Mat33> (init_Rcam_inv);
        float3 device_volume_size = device_cast<const float3>(tsdf_volume_->getSize ());

        //integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcam_inv, device_tcam, tranc_dist, volume_);
        device::integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcam_inv, device_tcam, tsdf_volume_->getTsdfTruncDist (), tsdf_volume_->data (), getCyclicalBufferStructure (), depthRawScaled_);
        */

        for (int i = 0; i < LEVELS; ++i)
            device::tranformMaps (vmaps_curr_[i], nmaps_curr_[i], device_initial_cam_rot, device_initial_cam_trans, vmaps_g_prev_[i], nmaps_g_prev_[i]);


        if(perform_last_scan_)
            finished_ = true;


        ++global_time_;
        return (false);
    }

    ///////////////////////////////////////////////////////////////////////////////////////////
    // Iterative Closest Point


    // GET PREVIOUS GLOBAL TRANSFORM
    // Previous global rotation
    Matrix3frm cam_rot_global_prev = rmats_[global_time_ - 1];            // [Ri|ti] - pos of camera, i.e.
    // Previous global translation
    Vector3f   cam_trans_global_prev = tvecs_[global_time_ - 1];          // transform from camera to global coo space for (i-1)th camera pose
    // Previous global inverse rotation
    Matrix3frm cam_rot_global_prev_inv = cam_rot_global_prev.inverse ();  // Rprev.t();

    // GET CURRENT GLOBAL TRANSFORM
    Matrix3frm cam_rot_global_curr = cam_rot_global_prev;                 // transform to global coo for ith camera pose
    Vector3f   cam_trans_global_curr = cam_trans_global_prev;

    // CONVERT TO DEVICE TYPES
    //LOCAL PREVIOUS TRANSFORM
    Mat33&  device_cam_rot_local_prev_inv = device_cast<Mat33> (cam_rot_global_prev_inv);

    float3& device_cam_trans_local_prev_tmp = device_cast<float3> (cam_trans_global_prev);
    float3 device_cam_trans_local_prev;
    device_cam_trans_local_prev.x = device_cam_trans_local_prev_tmp.x - (getCyclicalBufferStructure ())->origin_metric.x;
    device_cam_trans_local_prev.y = device_cam_trans_local_prev_tmp.y - (getCyclicalBufferStructure ())->origin_metric.y;
    device_cam_trans_local_prev.z = device_cam_trans_local_prev_tmp.z - (getCyclicalBufferStructure ())->origin_metric.z;

    /*

    Matrix3frm Rprev = rmats_[global_time_ - 1]; //  [Ri|ti] - pos of camera, i.e.
    Vector3f   tprev = tvecs_[global_time_ - 1]; //  tranfrom from camera to global coo space for (i-1)th camera pose
    Matrix3frm Rprev_inv = Rprev.inverse (); //Rprev.t();

    //Mat33&  device_Rprev     = device_cast<Mat33> (Rprev);
    Mat33&  device_Rprev_inv = device_cast<Mat33> (Rprev_inv);
    float3& device_tprev     = device_cast<float3> (tprev);

    Matrix3frm Rcurr = Rprev; // tranform to global coo for ith camera pose
    Vector3f   tcurr = tprev;
    */
    {
        //ScopeTime time("icp-all");
        for (int level_index = LEVELS-1; level_index>=0; --level_index)
        {
            int iter_num = icp_iterations_[level_index];

            // current maps
            MapArr& vmap_curr = vmaps_curr_[level_index];
            MapArr& nmap_curr = nmaps_curr_[level_index];

            // previous maps
            MapArr& vmap_g_prev = vmaps_g_prev_[level_index];
            MapArr& nmap_g_prev = nmaps_g_prev_[level_index];

            // We need to transform the maps from global to the local coordinates
            Mat33&  rotation_id = device_cast<Mat33> (rmats_[0]); // Identity Rotation Matrix. Because we only need translation
            float3 cube_origin = (getCyclicalBufferStructure ())->origin_metric;
            cube_origin.x = -cube_origin.x;
            cube_origin.y = -cube_origin.y;
            cube_origin.z = -cube_origin.z;

            MapArr& vmap_temp = vmap_g_prev;
            MapArr& nmap_temp = nmap_g_prev;
            device::tranformMaps (vmap_temp, nmap_temp, rotation_id, cube_origin, vmap_g_prev, nmap_g_prev);

            /*
            MapArr& vmap_curr = vmaps_curr_[level_index];
            MapArr& nmap_curr = nmaps_curr_[level_index];

            //MapArr& vmap_g_curr = vmaps_g_curr_[level_index];
            //MapArr& nmap_g_curr = nmaps_g_curr_[level_index];

            MapArr& vmap_g_prev = vmaps_g_prev_[level_index];
            MapArr& nmap_g_prev = nmaps_g_prev_[level_index];
            */
            //CorespMap& coresp = coresps_[level_index];

            for (int iter = 0; iter < iter_num; ++iter)
            {
                /*
                Mat33&  device_Rcurr = device_cast<Mat33> (Rcurr);
                float3& device_tcurr = device_cast<float3>(tcurr);
                */
                //CONVERT TO DEVICE TYPES
                // CURRENT LOCAL TRANSFORM
                Mat33&  device_cam_rot_local_curr = device_cast<Mat33> (cam_rot_global_curr);/// We have not dealt with changes in rotations

                float3& device_cam_trans_local_curr_tmp = device_cast<float3> (cam_trans_global_curr);
                float3 device_cam_trans_local_curr;
                device_cam_trans_local_curr.x = device_cam_trans_local_curr_tmp.x - (getCyclicalBufferStructure ())->origin_metric.x;
                device_cam_trans_local_curr.y = device_cam_trans_local_curr_tmp.y - (getCyclicalBufferStructure ())->origin_metric.y;
                device_cam_trans_local_curr.z = device_cam_trans_local_curr_tmp.z - (getCyclicalBufferStructure ())->origin_metric.z;

                Eigen::Matrix<double, 6, 6, Eigen::RowMajor> A;
                Eigen::Matrix<double, 6, 1> b;

                estimateCombined (device_cam_rot_local_curr, device_cam_trans_local_curr, vmap_curr, nmap_curr, device_cam_rot_local_prev_inv, device_cam_trans_local_prev, intr (level_index),
                                  vmap_g_prev, nmap_g_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data ());
                /*
                        estimateCombined (device_Rcurr, device_tcurr, vmap_curr, nmap_curr, device_Rprev_inv, device_tprev, intr (level_index),
                                          vmap_g_prev, nmap_g_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data ());
                */
                //checking nullspace
                double det = A.determinant ();





                if ( fabs (det) < 1e-15 || pcl_isnan (det) )
                {
                    if (pcl_isnan (det)) cout << "qnan" << endl;

                    PCL_ERROR ("LOST...\n");
                    reset ();
                    return (false);
                }
                //float maxc = A.maxCoeff();

                Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b).cast<float>();
                //Eigen::Matrix<float, 6, 1> result = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b);

                float alpha = result (0);
                float beta  = result (1);
                float gamma = result (2);

                Eigen::Matrix3f cam_rot_incremental = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ());
                Vector3f cam_trans_incremental = result.tail<3> ();

                //compose
                cam_trans_global_curr = cam_rot_incremental * cam_trans_global_curr + cam_trans_incremental;
                cam_rot_global_curr = cam_rot_incremental * cam_rot_global_curr;
                /*
                tcurr = Rinc * tcurr + tinc;
                Rcurr = Rinc * Rcurr;
                */
            }
        }
    }
    //save tranform
    rmats_.push_back (cam_rot_global_curr);
    tvecs_.push_back (cam_trans_global_curr);
    /*
    rmats_.push_back (Rcurr);
    tvecs_.push_back (tcurr);
    */

    //check for shift
    bool has_shifted = cyclical_.checkForShift(tsdf_volume_, getCameraPose (), 0.6 * volume_size_, true, perform_last_scan_);

    if(has_shifted)
        PCL_WARN ("SHIFTING\n");

    // get NEW local rotation
    Matrix3frm cam_rot_local_curr_inv = cam_rot_global_curr.inverse ();
    Mat33&  device_cam_rot_local_curr_inv = device_cast<Mat33> (cam_rot_local_curr_inv);
    Mat33&  device_cam_rot_local_curr = device_cast<Mat33> (cam_rot_global_curr);

    // get NEW local translation
    float3& device_cam_trans_local_curr_tmp = device_cast<float3> (cam_trans_global_curr);
    float3 device_cam_trans_local_curr;
    device_cam_trans_local_curr.x = device_cam_trans_local_curr_tmp.x - (getCyclicalBufferStructure ())->origin_metric.x;
    device_cam_trans_local_curr.y = device_cam_trans_local_curr_tmp.y - (getCyclicalBufferStructure ())->origin_metric.y;
    device_cam_trans_local_curr.z = device_cam_trans_local_curr_tmp.z - (getCyclicalBufferStructure ())->origin_metric.z;


    ///////////////////////////////////////////////////////////////////////////////////////////
    // Integration check - We do not integrate volume if camera does not move.
    float rnorm = rodrigues2(cam_rot_global_curr.inverse() * cam_rot_global_prev).norm();
    float tnorm = (cam_trans_global_curr - cam_trans_global_prev).norm();
    const float alpha = 1.f;
    bool integrate = (rnorm + alpha * tnorm)/2 >= integration_metric_threshold_;
    //~ if(integrate)
    //~ std::cout << "\tCamera movement since previous frame was " << (rnorm + alpha * tnorm)/2 << " integrate is set to " << integrate << std::endl;
    //~ else
    //~ std::cout << "Camera movement since previous frame was " << (rnorm + alpha * tnorm)/2 << " integrate is set to " << integrate << std::endl;

    ///////////////////////////////////////////////////////////////////////////////////////////
    // Volume integration
    float3 device_volume_size = device_cast<const float3> (tsdf_volume_->getSize());
    /*
      Matrix3frm Rcurr_inv = Rcurr.inverse ();
      Mat33&  device_Rcurr_inv = device_cast<Mat33> (Rcurr_inv);
      float3& device_tcurr = device_cast<float3> (tcurr);*/
    if (integrate)
    {
        //integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcurr_inv, device_tcurr, tranc_dist, volume_);
        integrateTsdfVolume (depth_raw, intr, device_volume_size, device_cam_rot_local_curr_inv, device_cam_trans_local_curr, tsdf_volume_->getTsdfTruncDist (), tsdf_volume_->data (), getCyclicalBufferStructure (), depthRawScaled_);
    }

    ///////////////////////////////////////////////////////////////////////////////////////////
    // Ray casting
    /*Mat33& device_Rcurr = device_cast<Mat33> (Rcurr);*/
    {
        raycast (intr, device_cam_rot_local_curr, device_cam_trans_local_curr, tsdf_volume_->getTsdfTruncDist (), device_volume_size, tsdf_volume_->data (), getCyclicalBufferStructure (), vmaps_g_prev_[0], nmaps_g_prev_[0]);

        // POST-PROCESSING: We need to transform the newly raycasted maps into the global space.
        Mat33&  rotation_id = device_cast<Mat33> (rmats_[0]); /// Identity Rotation Matrix. Because we only need translation
        float3 cube_origin = (getCyclicalBufferStructure ())->origin_metric;

        //~ PCL_INFO ("Raycasting with cube origin at %f, %f, %f\n", cube_origin.x, cube_origin.y, cube_origin.z);

        MapArr& vmap_temp = vmaps_g_prev_[0];
        MapArr& nmap_temp = nmaps_g_prev_[0];

        device::tranformMaps (vmap_temp, nmap_temp, rotation_id, cube_origin, vmaps_g_prev_[0], nmaps_g_prev_[0]);

        for (int i = 1; i < LEVELS; ++i)
        {
            resizeVMap (vmaps_g_prev_[i-1], vmaps_g_prev_[i]);
            resizeNMap (nmaps_g_prev_[i-1], nmaps_g_prev_[i]);
        }
        pcl::device::sync ();
    }

    if(has_shifted && perform_last_scan_)
        extractAndMeshWorld ();

    ++global_time_;
    return (true);
}
示例#4
0
文件: kinfu.cpp 项目: neeljp/pcl
inline bool 
pcl::gpu::kinfuLS::KinfuTracker::performICP(const Intr& cam_intrinsics, Matrix3frm& previous_global_rotation, Vector3f& previous_global_translation, Matrix3frm& current_global_rotation , Vector3f& current_global_translation)
{
  
  if(disable_icp_)
  {
    lost_=false;
    return (true);
  }
  
  // Compute inverse rotation
  Matrix3frm previous_global_rotation_inv = previous_global_rotation.inverse ();  // Rprev.t();  
 
 ///////////////////////////////////////////////
  // Convert pose to device type
  Mat33  device_cam_rot_local_prev_inv; 
  float3 device_cam_trans_local_prev;
  convertTransforms(previous_global_rotation_inv, previous_global_translation, device_cam_rot_local_prev_inv, device_cam_trans_local_prev);  
  device_cam_trans_local_prev -= getCyclicalBufferStructure ()->origin_metric; ;
 
  // Use temporary pose, so that we modify the current global pose only if ICP converged
  Matrix3frm resulting_rotation;
  Vector3f resulting_translation;
  
  // Initialize output pose to current pose
  current_global_rotation = previous_global_rotation;
  current_global_translation = previous_global_translation;
 
  ///////////////////////////////////////////////
  // Run ICP
  {
    //ScopeTime time("icp-all");
    for (int level_index = LEVELS-1; level_index>=0; --level_index)
    {
      int iter_num = icp_iterations_[level_index];
      
      // current vertex and normal maps
      MapArr& vmap_curr = vmaps_curr_[level_index];
      MapArr& nmap_curr = nmaps_curr_[level_index];   
      
      // previous vertex and normal maps
      MapArr& vmap_g_prev = vmaps_g_prev_[level_index];
      MapArr& nmap_g_prev = nmaps_g_prev_[level_index];
      
      // We need to transform the maps from global to local coordinates
      Mat33&  rotation_id = device_cast<Mat33> (rmats_[0]); // Identity Rotation Matrix. Because we only need translation
      float3 cube_origin = (getCyclicalBufferStructure ())->origin_metric;
      cube_origin = -cube_origin;
      
      MapArr& vmap_temp = vmap_g_prev;
      MapArr& nmap_temp = nmap_g_prev;
      transformMaps (vmap_temp, nmap_temp, rotation_id, cube_origin, vmap_g_prev, nmap_g_prev); 
      
      // run ICP for iter_num iterations (return false when lost)
      for (int iter = 0; iter < iter_num; ++iter)
      {
        //CONVERT POSES TO DEVICE TYPES
        // CURRENT LOCAL POSE
        Mat33    device_current_rotation = device_cast<Mat33> (current_global_rotation); // We do not deal with changes in rotations        
        float3 device_current_translation_local = device_cast<float3> (current_global_translation);
        device_current_translation_local -= getCyclicalBufferStructure ()->origin_metric; 
                
        Eigen::Matrix<double, 6, 6, Eigen::RowMajor> A;
        Eigen::Matrix<double, 6, 1> b;

        // call the ICP function (see paper by Kok-Lim Low "Linear Least-squares Optimization for Point-to-Plane ICP Surface Registration")
        estimateCombined (device_current_rotation, device_current_translation_local, vmap_curr, nmap_curr, device_cam_rot_local_prev_inv, device_cam_trans_local_prev, cam_intrinsics (level_index), 
                          vmap_g_prev, nmap_g_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data ());

        // checking nullspace 
        double det = A.determinant ();
    
        if ( fabs (det) < 1e-15 /*100000 */ || pcl_isnan (det) ) //TODO find a threshold that makes ICP track well, but prevents it from generating wrong transforms
        {
          if (pcl_isnan (det)) cout << "qnan" << endl;
          if(lost_ == false)
            PCL_ERROR ("ICP LOST... PLEASE COME BACK TO THE LAST VALID POSE (green)\n");
          //reset (); //GUI will now show the user that ICP is lost. User needs to press "R" to reset the volume
          lost_ = true;
          return (false);
        }

        Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b).cast<float>();
        float alpha = result (0);
        float beta  = result (1);
        float gamma = result (2);

        // deduce incremental rotation and translation from ICP's results
        Eigen::Matrix3f cam_rot_incremental = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ());
        Vector3f cam_trans_incremental = result.tail<3> ();

        //compose global pose
        current_global_translation = cam_rot_incremental * current_global_translation + cam_trans_incremental;
        current_global_rotation = cam_rot_incremental * current_global_rotation;
      }
    }
  }
  // ICP has converged
  lost_ = false;
  return (true);
}