matrix_3f inverse ( const matrix_3f & A ) { Matrix3frm B; matrix_3f A_inverse; B = matrix_3f_to_Eigen ( A ); A_inverse = Eigen_to_matrix_3f ( B.inverse() ); return A_inverse; }
void MyPointCloud::transformPointCloud(Matrix3frm Rcam, Vector3f tcam, std::vector<device::MapArr> &vmapDst, std::vector<device::MapArr> &nmapDst, bool inverse) { if(inverse) Rcam = Rcam.inverse(); device::Mat33& device_Rcam = device_cast<device::Mat33> (Rcam); float3& device_tcam = device_cast<float3>(tcam); for (int i = 0; i < LEVELS; ++i) device::tranformMaps(vmaps_[i], nmaps_[i], device_Rcam, device_tcam, vmapDst[i], nmapDst[i], inverse); }
void Mesh::applyTransformation(std::vector<Matrix3frm>& rmat, std::vector<Vector3f>& tvec, int globalTime) { pcl::PointCloud<pcl::PointXYZ> pointCloud; pcl::fromROSMsg(mesh_.cloud, pointCloud); Matrix3frm rmatPrev = rmat[globalTime - 1]; Vector3f tvecPrev = tvec[globalTime - 1]; Matrix3frm rmatCurr = rmat[globalTime]; Vector3f tvecCurr = tvec[globalTime]; Matrix3frm rmatPrevInverse = rmatPrev.inverse(); Matrix3frm rmatCurrInverse = rmatCurr.inverse(); float x, y, z; for(int point = 0; point < pointCloud.points.size(); point++) { x = pointCloud.points[point].x * rmatCurr(0, 0) + pointCloud.points[point].y * rmatCurr(0, 1) + pointCloud.points[point].z * rmatCurr(0, 2) + tvecCurr(0); y = pointCloud.points[point].x * rmatCurr(1, 0) + pointCloud.points[point].y * rmatCurr(1, 1) + pointCloud.points[point].z * rmatCurr(1, 2) + tvecCurr(1); z = pointCloud.points[point].x * rmatCurr(2, 0) + pointCloud.points[point].y * rmatCurr(2, 1) + pointCloud.points[point].z * rmatCurr(2, 2) + tvecCurr(2); x -= tvecPrev(0); y -= tvecPrev(1); z -= tvecPrev(2); pointCloud.points[point].x = x * rmatPrevInverse(0, 0) + y * rmatPrevInverse(0, 1) + z * rmatPrevInverse(0, 2); pointCloud.points[point].y = x * rmatPrevInverse(1, 0) + y * rmatPrevInverse(1, 1) + z * rmatPrevInverse(1, 2); pointCloud.points[point].z = x * rmatPrevInverse(2, 0) + y * rmatPrevInverse(2, 1) + z * rmatPrevInverse(2, 2); } pcl::toROSMsg(pointCloud, mesh_.cloud); }
bool pcl::gpu::KinfuTracker::operator() (const DepthMap& depth_raw, Eigen::Affine3f *hint) { device::Intr intr (fx_, fy_, cx_, cy_); if (!disable_icp_) { { //ScopeTime time(">>> Bilateral, pyr-down-all, create-maps-all"); //depth_raw.copyTo(depths_curr[0]); device::bilateralFilter (depth_raw, depths_curr_[0]); if (max_icp_distance_ > 0) device::truncateDepth(depths_curr_[0], max_icp_distance_); for (int i = 1; i < LEVELS; ++i) device::pyrDown (depths_curr_[i-1], depths_curr_[i]); for (int i = 0; i < LEVELS; ++i) { device::createVMap (intr(i), depths_curr_[i], vmaps_curr_[i]); //device::createNMap(vmaps_curr_[i], nmaps_curr_[i]); computeNormalsEigen (vmaps_curr_[i], nmaps_curr_[i]); } pcl::device::sync (); } //can't perform more on first frame if (global_time_ == 0) { Matrix3frm init_Rcam = rmats_[0]; // [Ri|ti] - pos of camera, i.e. Vector3f init_tcam = tvecs_[0]; // transform from camera to global coo space for (i-1)th camera pose Mat33& device_Rcam = device_cast<Mat33> (init_Rcam); float3& device_tcam = device_cast<float3>(init_tcam); Matrix3frm init_Rcam_inv = init_Rcam.inverse (); Mat33& device_Rcam_inv = device_cast<Mat33> (init_Rcam_inv); float3 device_volume_size = device_cast<const float3>(tsdf_volume_->getSize()); //integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcam_inv, device_tcam, tranc_dist, volume_); device::integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcam_inv, device_tcam, tsdf_volume_->getTsdfTruncDist(), tsdf_volume_->data(), depthRawScaled_); for (int i = 0; i < LEVELS; ++i) device::tranformMaps (vmaps_curr_[i], nmaps_curr_[i], device_Rcam, device_tcam, vmaps_g_prev_[i], nmaps_g_prev_[i]); ++global_time_; return (false); } /////////////////////////////////////////////////////////////////////////////////////////// // Iterative Closest Point Matrix3frm Rprev = rmats_[global_time_ - 1]; // [Ri|ti] - pos of camera, i.e. Vector3f tprev = tvecs_[global_time_ - 1]; // tranfrom from camera to global coo space for (i-1)th camera pose Matrix3frm Rprev_inv = Rprev.inverse (); //Rprev.t(); //Mat33& device_Rprev = device_cast<Mat33> (Rprev); Mat33& device_Rprev_inv = device_cast<Mat33> (Rprev_inv); float3& device_tprev = device_cast<float3> (tprev); Matrix3frm Rcurr; Vector3f tcurr; if(hint) { Rcurr = hint->rotation().matrix(); tcurr = hint->translation().matrix(); } else { Rcurr = Rprev; // transform to global coo for ith camera pose tcurr = tprev; } { //ScopeTime time("icp-all"); for (int level_index = LEVELS-1; level_index>=0; --level_index) { int iter_num = icp_iterations_[level_index]; MapArr& vmap_curr = vmaps_curr_[level_index]; MapArr& nmap_curr = nmaps_curr_[level_index]; //MapArr& vmap_g_curr = vmaps_g_curr_[level_index]; //MapArr& nmap_g_curr = nmaps_g_curr_[level_index]; MapArr& vmap_g_prev = vmaps_g_prev_[level_index]; MapArr& nmap_g_prev = nmaps_g_prev_[level_index]; //CorespMap& coresp = coresps_[level_index]; for (int iter = 0; iter < iter_num; ++iter) { Mat33& device_Rcurr = device_cast<Mat33> (Rcurr); float3& device_tcurr = device_cast<float3>(tcurr); Eigen::Matrix<double, 6, 6, Eigen::RowMajor> A; Eigen::Matrix<double, 6, 1> b; #if 0 device::tranformMaps(vmap_curr, nmap_curr, device_Rcurr, device_tcurr, vmap_g_curr, nmap_g_curr); findCoresp(vmap_g_curr, nmap_g_curr, device_Rprev_inv, device_tprev, intr(level_index), vmap_g_prev, nmap_g_prev, distThres_, angleThres_, coresp); device::estimateTransform(vmap_g_prev, nmap_g_prev, vmap_g_curr, coresp, gbuf_, sumbuf_, A.data(), b.data()); //cv::gpu::GpuMat ma(coresp.rows(), coresp.cols(), CV_32S, coresp.ptr(), coresp.step()); //cv::Mat cpu; //ma.download(cpu); //cv::imshow(names[level_index] + string(" --- coresp white == -1"), cpu == -1); #else estimateCombined (device_Rcurr, device_tcurr, vmap_curr, nmap_curr, device_Rprev_inv, device_tprev, intr (level_index), vmap_g_prev, nmap_g_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data ()); #endif //checking nullspace double det = A.determinant (); if (fabs (det) < 1e-15 || pcl_isnan (det)) { if (pcl_isnan (det)) cout << "qnan" << endl; reset (); return (false); } //float maxc = A.maxCoeff(); Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b).cast<float>(); //Eigen::Matrix<float, 6, 1> result = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b); float alpha = result (0); float beta = result (1); float gamma = result (2); Eigen::Matrix3f Rinc = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ()); Vector3f tinc = result.tail<3> (); //compose tcurr = Rinc * tcurr + tinc; Rcurr = Rinc * Rcurr; } } } //save transform rmats_.push_back (Rcurr); tvecs_.push_back (tcurr); } else /* if (disable_icp_) */ { if (global_time_ == 0) ++global_time_; Matrix3frm Rcurr = rmats_[global_time_ - 1]; Vector3f tcurr = tvecs_[global_time_ - 1]; rmats_.push_back (Rcurr); tvecs_.push_back (tcurr); } Matrix3frm Rprev = rmats_[global_time_ - 1]; Vector3f tprev = tvecs_[global_time_ - 1]; Matrix3frm Rcurr = rmats_.back(); Vector3f tcurr = tvecs_.back(); /////////////////////////////////////////////////////////////////////////////////////////// // Integration check - We do not integrate volume if camera does not move. float rnorm = rodrigues2(Rcurr.inverse() * Rprev).norm(); float tnorm = (tcurr - tprev).norm(); const float alpha = 1.f; bool integrate = (rnorm + alpha * tnorm)/2 >= integration_metric_threshold_; if (disable_icp_) integrate = true; /////////////////////////////////////////////////////////////////////////////////////////// // Volume integration float3 device_volume_size = device_cast<const float3> (tsdf_volume_->getSize()); Matrix3frm Rcurr_inv = Rcurr.inverse (); Mat33& device_Rcurr_inv = device_cast<Mat33> (Rcurr_inv); float3& device_tcurr = device_cast<float3> (tcurr); if (integrate) { //ScopeTime time("tsdf"); //integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcurr_inv, device_tcurr, tranc_dist, volume_); integrateTsdfVolume (depth_raw, intr, device_volume_size, device_Rcurr_inv, device_tcurr, tsdf_volume_->getTsdfTruncDist(), tsdf_volume_->data(), depthRawScaled_); } /////////////////////////////////////////////////////////////////////////////////////////// // Ray casting Mat33& device_Rcurr = device_cast<Mat33> (Rcurr); { //ScopeTime time("ray-cast-all"); raycast (intr, device_Rcurr, device_tcurr, tsdf_volume_->getTsdfTruncDist(), device_volume_size, tsdf_volume_->data(), vmaps_g_prev_[0], nmaps_g_prev_[0]); for (int i = 1; i < LEVELS; ++i) { resizeVMap (vmaps_g_prev_[i-1], vmaps_g_prev_[i]); resizeNMap (nmaps_g_prev_[i-1], nmaps_g_prev_[i]); } pcl::device::sync (); } ++global_time_; return (true); }
bool MyPointCloud::alignPointClouds(std::vector<Matrix3frm>& Rcam, std::vector<Vector3f>& tcam, MyPointCloud *globalPreviousPointCloud, device::Intr& intrinsics, int globalTime) { Matrix3frm Rprev = Rcam[globalTime - 1]; // [Ri|ti] - pos of camera, i.e. Vector3f tprev = tcam[globalTime - 1]; // tranfrom from camera to global coo space for (i-1)th camera pose Matrix3frm Rprev_inv = Rprev.inverse(); //Rprev.t(); device::Mat33& device_Rprev_inv = device_cast<device::Mat33> (Rprev_inv); float3& device_tprev = device_cast<float3> (tprev); Matrix3frm Rcurr = Rprev; // tranform to global coo for ith camera pose Vector3f tcurr = tprev; #pragma omp parallel for for(int level = LEVELS - 1; level >= 0; --level) { int iterations = icpIterations_[level]; #pragma omp parallel for for(int iteration = 0; iteration < iterations; ++iteration) { { printf(" ICP level %d iteration %d", level, iteration); pcl::ScopeTime time(""); device::Mat33& device_Rcurr = device_cast<device::Mat33> (Rcurr); float3& device_tcurr = device_cast<float3>(tcurr); Eigen::Matrix<float, 6, 6, Eigen::RowMajor> A; Eigen::Matrix<float, 6, 1> b; if(level == 2 && iteration == 0) error_.create(rows_ * 4, cols_); device::estimateCombined (device_Rcurr, device_tcurr, vmaps_[level], nmaps_[level], device_Rprev_inv, device_tprev, intrinsics (level), globalPreviousPointCloud->getVertexMaps()[level], globalPreviousPointCloud->getNormalMaps()[level], distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data (), error_); //checking nullspace float det = A.determinant (); if (fabs (det) < 1e-15 || !pcl::device::valid_host (det)) { // printf("ICP failed at level %d, iteration %d (global time %d)\n", level, iteration, globalTime); return (false); } //else printf("ICP succeed at level %d, iteration %d (global time %d)\n", level, iteration, globalTime); Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b); //Eigen::Matrix<float, 6, 1> result = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b); float alpha = result (0); float beta = result (1); float gamma = result (2); Eigen::Matrix3f Rinc = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ()); Vector3f tinc = result.tail<3> (); //compose tcurr = Rinc * tcurr + tinc; Rcurr = Rinc * Rcurr; } } } //save tranform Rcam[globalTime] = Rcurr; tcam[globalTime] = tcurr; return (true); }
inline bool pcl::gpu::kinfuLS::KinfuTracker::performICP(const Intr& cam_intrinsics, Matrix3frm& previous_global_rotation, Vector3f& previous_global_translation, Matrix3frm& current_global_rotation , Vector3f& current_global_translation) { if(disable_icp_) { lost_=false; return (true); } // Compute inverse rotation Matrix3frm previous_global_rotation_inv = previous_global_rotation.inverse (); // Rprev.t(); /////////////////////////////////////////////// // Convert pose to device type Mat33 device_cam_rot_local_prev_inv; float3 device_cam_trans_local_prev; convertTransforms(previous_global_rotation_inv, previous_global_translation, device_cam_rot_local_prev_inv, device_cam_trans_local_prev); device_cam_trans_local_prev -= getCyclicalBufferStructure ()->origin_metric; ; // Use temporary pose, so that we modify the current global pose only if ICP converged Matrix3frm resulting_rotation; Vector3f resulting_translation; // Initialize output pose to current pose current_global_rotation = previous_global_rotation; current_global_translation = previous_global_translation; /////////////////////////////////////////////// // Run ICP { //ScopeTime time("icp-all"); for (int level_index = LEVELS-1; level_index>=0; --level_index) { int iter_num = icp_iterations_[level_index]; // current vertex and normal maps MapArr& vmap_curr = vmaps_curr_[level_index]; MapArr& nmap_curr = nmaps_curr_[level_index]; // previous vertex and normal maps MapArr& vmap_g_prev = vmaps_g_prev_[level_index]; MapArr& nmap_g_prev = nmaps_g_prev_[level_index]; // We need to transform the maps from global to local coordinates Mat33& rotation_id = device_cast<Mat33> (rmats_[0]); // Identity Rotation Matrix. Because we only need translation float3 cube_origin = (getCyclicalBufferStructure ())->origin_metric; cube_origin = -cube_origin; MapArr& vmap_temp = vmap_g_prev; MapArr& nmap_temp = nmap_g_prev; transformMaps (vmap_temp, nmap_temp, rotation_id, cube_origin, vmap_g_prev, nmap_g_prev); // run ICP for iter_num iterations (return false when lost) for (int iter = 0; iter < iter_num; ++iter) { //CONVERT POSES TO DEVICE TYPES // CURRENT LOCAL POSE Mat33 device_current_rotation = device_cast<Mat33> (current_global_rotation); // We do not deal with changes in rotations float3 device_current_translation_local = device_cast<float3> (current_global_translation); device_current_translation_local -= getCyclicalBufferStructure ()->origin_metric; Eigen::Matrix<double, 6, 6, Eigen::RowMajor> A; Eigen::Matrix<double, 6, 1> b; // call the ICP function (see paper by Kok-Lim Low "Linear Least-squares Optimization for Point-to-Plane ICP Surface Registration") estimateCombined (device_current_rotation, device_current_translation_local, vmap_curr, nmap_curr, device_cam_rot_local_prev_inv, device_cam_trans_local_prev, cam_intrinsics (level_index), vmap_g_prev, nmap_g_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data ()); // checking nullspace double det = A.determinant (); if ( fabs (det) < 1e-15 /*100000 */ || pcl_isnan (det) ) //TODO find a threshold that makes ICP track well, but prevents it from generating wrong transforms { if (pcl_isnan (det)) cout << "qnan" << endl; if(lost_ == false) PCL_ERROR ("ICP LOST... PLEASE COME BACK TO THE LAST VALID POSE (green)\n"); //reset (); //GUI will now show the user that ICP is lost. User needs to press "R" to reset the volume lost_ = true; return (false); } Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b).cast<float>(); float alpha = result (0); float beta = result (1); float gamma = result (2); // deduce incremental rotation and translation from ICP's results Eigen::Matrix3f cam_rot_incremental = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ()); Vector3f cam_trans_incremental = result.tail<3> (); //compose global pose current_global_translation = cam_rot_incremental * current_global_translation + cam_trans_incremental; current_global_rotation = cam_rot_incremental * current_global_rotation; } } } // ICP has converged lost_ = false; return (true); }