template <typename PointSource, typename PointTarget, typename Scalar> void pcl::registration::TransformationEstimationSVD<PointSource, PointTarget, Scalar>::getTransformationFromCorrelation ( const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_src_demean, const Eigen::Matrix<Scalar, 4, 1> ¢roid_src, const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_tgt_demean, const Eigen::Matrix<Scalar, 4, 1> ¢roid_tgt, Matrix4 &transformation_matrix) const { transformation_matrix.setIdentity (); // Assemble the correlation matrix H = source * target' Eigen::Matrix<Scalar, 3, 3> H = (cloud_src_demean * cloud_tgt_demean.transpose ()).topLeftCorner (3, 3); // Compute the Singular Value Decomposition Eigen::JacobiSVD<Eigen::Matrix<Scalar, 3, 3> > svd (H, Eigen::ComputeFullU | Eigen::ComputeFullV); Eigen::Matrix<Scalar, 3, 3> u = svd.matrixU (); Eigen::Matrix<Scalar, 3, 3> v = svd.matrixV (); // Compute R = V * U' if (u.determinant () * v.determinant () < 0) { for (int x = 0; x < 3; ++x) v (x, 2) *= -1; } Eigen::Matrix<Scalar, 3, 3> R = v * u.transpose (); // Return the correct transformation transformation_matrix.topLeftCorner (3, 3) = R; const Eigen::Matrix<Scalar, 3, 1> Rc (R * centroid_src.head (3)); transformation_matrix.block (0, 3, 3, 1) = centroid_tgt.head (3) - Rc; }
template <typename PointSource, typename PointTarget, typename Scalar> void pcl::registration::TransformationEstimationSVDScale<PointSource, PointTarget, Scalar>::getTransformationFromCorrelation ( const Eigen::MatrixXf &cloud_src_demean, const Eigen::Vector4f ¢roid_src, const Eigen::MatrixXf &cloud_tgt_demean, const Eigen::Vector4f ¢roid_tgt, Matrix4 &transformation_matrix) const { transformation_matrix.setIdentity (); // Assemble the correlation matrix H = source * target' Eigen::Matrix<Scalar, 3, 3> H = (cloud_src_demean.cast<Scalar> () * cloud_tgt_demean.cast<Scalar> ().transpose ()).topLeftCorner (3, 3); // Compute the Singular Value Decomposition Eigen::JacobiSVD<Eigen::Matrix<Scalar, 3, 3> > svd (H, Eigen::ComputeFullU | Eigen::ComputeFullV); Eigen::Matrix<Scalar, 3, 3> u = svd.matrixU (); Eigen::Matrix<Scalar, 3, 3> v = svd.matrixV (); // Compute R = V * U' if (u.determinant () * v.determinant () < 0) { for (int x = 0; x < 3; ++x) v (x, 2) *= -1; } Eigen::Matrix<Scalar, 3, 3> R = v * u.transpose (); // rotated cloud Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> src_ = R * cloud_src_demean.cast<Scalar> (); float scale1, scale2; double sum_ss = 0.0f, sum_tt = 0.0f, sum_tt_ = 0.0f; for (unsigned corrIdx = 0; corrIdx < cloud_src_demean.cols (); ++corrIdx) { sum_ss += cloud_src_demean (0, corrIdx) * cloud_src_demean (0, corrIdx); sum_ss += cloud_src_demean (1, corrIdx) * cloud_src_demean (1, corrIdx); sum_ss += cloud_src_demean (2, corrIdx) * cloud_src_demean (2, corrIdx); sum_tt += cloud_tgt_demean (0, corrIdx) * cloud_tgt_demean (0, corrIdx); sum_tt += cloud_tgt_demean (1, corrIdx) * cloud_tgt_demean (1, corrIdx); sum_tt += cloud_tgt_demean (2, corrIdx) * cloud_tgt_demean (2, corrIdx); sum_tt_ += cloud_tgt_demean (0, corrIdx) * src_ (0, corrIdx); sum_tt_ += cloud_tgt_demean (1, corrIdx) * src_ (1, corrIdx); sum_tt_ += cloud_tgt_demean (2, corrIdx) * src_ (2, corrIdx); } scale1 = sqrt (sum_tt / sum_ss); scale2 = sum_tt_ / sum_ss; float scale = scale2; transformation_matrix.topLeftCorner (3, 3) = scale * R; const Eigen::Matrix<Scalar, 3, 1> Rc (R * centroid_src.cast<Scalar> ().head (3)); transformation_matrix.block (0, 3, 3, 1) = centroid_tgt.cast<Scalar> (). head (3) - Rc; }
tetra::tetra(point *a, point *b, point *c, point *d) { p[0] = a; p[1] = b; p[2] = c; p[3] = d; ngb.fill(nullptr); nid.fill(-1); f = { { { p[3], p[2], p[1] }, // 0 - 321 { p[0], p[2], p[3] }, // 1 - 023 { p[0], p[3], p[1] }, // 2 - 031 { p[0], p[1], p[2] } // 3 - 012 } }; #ifndef NDEBUG if (a) { Eigen::Matrix<double, 4, 4> ort; ort << 1, a->x, a->y, a->z, 1, b->x, b->y, b->z, 1, c->x, c->y, c->z, 1, d->x, d->y, d->z; assert(ort.determinant() > 0); // assert positive orientation } #endif return; }
void ConstraintManager::computeConstraintForces() { for (std::vector< FixedDistanceConstraint* >::iterator iter = m_constraintForces.begin(); iter < m_constraintForces.end(); ++iter) { (*iter)->populateConstraintMatrix( m_constraint ); (*iter)->populateJacobian( m_jacobian ); (*iter)->populateJacobianDerivative( m_jacobianDeriv ); } Eigen::Matrix< double, Eigen::Dynamic, Eigen::Dynamic > invMassMatrix; Eigen::Matrix< double, Eigen::Dynamic, 1 > velocityCurrent; Eigen::Matrix< double, Eigen::Dynamic, 1 > forceCurrent; m_particleManager.getInvMassMatrix( invMassMatrix ); m_particleManager.getCurrentState( velocityCurrent, false, true ); m_particleManager.getCurrentDerivative( forceCurrent, false, true ); m_constraintDeriv = m_jacobian * velocityCurrent; Eigen::Matrix< double, Eigen::Dynamic, Eigen::Dynamic > jacobianInvJacobianT = m_jacobian * invMassMatrix * m_jacobian.transpose(); Eigen::Matrix< double, Eigen::Dynamic, 1 > jacobianDerivVelocity = m_jacobianDeriv * velocityCurrent; Eigen::Matrix< double, Eigen::Dynamic, 1 > jacobianInvMassForce = m_jacobian * invMassMatrix * forceCurrent; Eigen::Matrix< double, Eigen::Dynamic, 1 > b = -jacobianDerivVelocity - jacobianInvMassForce - m_ks*m_constraint - m_kd*m_constraintDeriv; if (jacobianInvJacobianT.determinant() != 0) { m_lagrangeMultipliers = jacobianInvJacobianT.ldlt().solve( b ); } else { m_lagrangeMultipliers.resize( m_constraintForces.size() ); m_lagrangeMultipliers.setZero(); } }
//----------------------------------------------------------------------------------------------- // Manipulability //----------------------------------------------------------------------------------------------- void LWR4Kinematics::getManipulabilityIdx(const KDL::Jacobian jac, unsigned int dof_param, double & manp_idx){ Eigen::Matrix<double,Eigen::Dynamic,Eigen::Dynamic> jj; Eigen::Matrix<double,Eigen::Dynamic,7> jac_dof; switch(dof_param){ case 1: // Translation jac_dof = jac.data.block<3,7>(0,0); jj = (jac_dof * jac_dof.transpose()); break; case 2: // Rotation jac_dof = jac.data.block<3,7>(3,0); jj = (jac_dof * jac_dof.transpose()); break; default: // All jj = (jac.data * jac.data.transpose()); break; } manp_idx = std::sqrt(jj.determinant()); }
unsigned int exact_inclusion(const tetra *t, const point *p) { Eigen::Matrix<double, 4, 4> A; std::array<int, 4> ort; for (int i = 0; i < 4; ++i) { point &a = *t->f[i][0]; point &b = *t->f[i][1]; point &c = *t->f[i][2]; A << 1, a.x, a.y, a.z, 1, b.x, b.y, b.z, 1, c.x, c.y, c.z, 1, p->x, p->y, p->z; auto tmp = A.determinant(); if (tmp < 0) return 0; else if (tmp == 0) ort[i] = 0; else ort[i] = 1; } return ort[0] + 2 * ort[1] + 4 * ort[2] + 8 * ort[3]; }
// Task: calculates third deviatoric invariant. Note that this routine requires a // Kelvin mapped DEVIATORIC vector // A deviatoric mapping was not done here in order to avoid unnecessary calculations double CalJ3(const KVec& dev_vec) { Eigen::Matrix<double, 3, 3> tens; tens = KelvinVectorToTensor(dev_vec); // TN: Not strictly necessary. Can be written explicitly for vector // coordinates return tens.determinant(); }
bool CameraPoseFinderICP::minimizePointToPlaneErrFunc(unsigned level,Eigen::Matrix<float, 6, 1> &six_dof,const Mat44& cur_transform,const Mat44& last_transform_inv) { cudaCalPointToPlaneErrSolverParams( level,cur_transform,last_transform_inv, _camera_params_pyramid[level],AppParams::instance()->_icp_params.fDistThres, AppParams::instance()->_icp_params.fNormSinThres); CudaMap1D<float> buf=(CudaDeviceDataMan::instance()->rigid_align_buf_reduced).clone(CPU); int shift = 0; Eigen::Matrix<float, 6, 6, Eigen::RowMajor> ATA; Eigen::Matrix<float, 6, 1> ATb; for (int i = 0; i < 6; ++i) //rows { for (int j = i; j < 7; ++j) // cols + b { float value = buf.at(shift++); if (j == 6) { ATb(i) = value; } else { ATA(i, j) = value; ATA(j, i) = value; } } } //cout<<ATb<<endl; if (ATA.determinant()<1E-10) { return false; } six_dof = ATA.llt().solve(ATb).cast<float>(); return true; }
void CCubicGrids::extractRTFromBuffer(const cuda::GpuMat& cvgmSumBuf_, Eigen::Matrix3f* peimRw_, Eigen::Vector3f* peivTw_) const{ Mat cvmSumBuf; cvgmSumBuf_.download(cvmSumBuf); double* aHostTmp = (double*)cvmSumBuf.data; //declare A and b Eigen::Matrix<double, 6, 6, Eigen::RowMajor> A; Eigen::Matrix<double, 6, 1> b; //retrieve A and b from cvmSumBuf short sShift = 0; for (int i = 0; i < 6; ++i){ // rows for (int j = i; j < 7; ++j) { // cols + b double value = aHostTmp[sShift++]; if (j == 6) // vector b b.data()[i] = value; else A.data()[j * 6 + i] = A.data()[i * 6 + j] = value; }//for each col }//for each row //checking nullspace double dDet = A.determinant(); if (fabs(dDet) < 1e-15 || dDet != dDet){ if (dDet != dDet) PRINTSTR("Failure -- dDet cannot be qnan. "); //reset (); return; }//if dDet is rational //float maxc = A.maxCoeff(); Eigen::Matrix<float, 6, 1> result = A.llt().solve(b).cast<float>(); //Eigen::Matrix<float, 6, 1> result = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b); float alpha = result(0); float beta = result(1); float gamma = result(2); Eigen::Matrix3f Rinc = (Eigen::Matrix3f)Eigen::AngleAxisf(gamma, Eigen::Vector3f::UnitZ()) * Eigen::AngleAxisf(beta, Eigen::Vector3f::UnitY()) * Eigen::AngleAxisf(alpha, Eigen::Vector3f::UnitX()); Eigen::Vector3f tinc = result.tail<3>(); //compose //eivTwCur = Rinc * eivTwCur + tinc; //eimrmRwCur = Rinc * eimrmRwCur; Eigen::Vector3f eivTinv = -peimRw_->transpose()* (*peivTw_); Eigen::Matrix3f eimRinv = peimRw_->transpose(); eivTinv = Rinc * eivTinv + tinc; eimRinv = Rinc * eimRinv; *peivTw_ = -eimRinv.transpose() * eivTinv; *peimRw_ = eimRinv.transpose(); }
/** * @brief findIntersection for 2D lines. Can handle pll lines (return nothing) * @param other * @return */ optional<const C2dImagePointPx> C2dLine::findIntersection(const C2dLine & other) const { Eigen::Matrix<double, 2, 2> directions; directions.block<2,1>(0,0) = direction; directions.block<2,1>(0,1) = -other.direction; if(fabs(directions.determinant()) < 1e-8)//Should usually be about 1 return optional<const C2dImagePointPx>(); const C2dImagePointPx diff = other.pointOnLine - pointOnLine; const TEigen2dPoint lambda_mu = directions.inverse() * diff; const C2dImagePointPx pointOnLine = point(lambda_mu(0)); if(IS_DEBUG) CHECK(!zero((other.point(lambda_mu(1)) - pointOnLine).squaredNorm()), "findIntersection failed (parallel lines?)"); return optional<const C2dImagePointPx>(pointOnLine); }
void NuTo::ContinuumElementIGA<TDim>::CalculateNMatrixBMatrixDetJacobian(EvaluateDataContinuum<TDim>& rData, int rTheIP) const { // calculate Jacobian const auto ipCoords = this->GetIntegrationType().GetLocalIntegrationPointCoordinates(rTheIP); const Eigen::MatrixXd& derivativeShapeFunctionsGeometryNatural = this->mInterpolationType->Get(Node::eDof::COORDINATES) .DerivativeShapeFunctionsNaturalIGA(ipCoords, mKnotIDs); Eigen::Matrix<double, TDim, TDim> jacobian = this->CalculateJacobian(derivativeShapeFunctionsGeometryNatural, rData.mNodalValues[Node::eDof::COORDINATES]); // there are two mappings in IGA: 1) reference element <=> parametric space (knots) 2) parametric space <=> physical // space // the B-matrix only the invJac of mapping 2) is needed rData.mDetJacobian = jacobian.determinant(); for (int i = 0; i < TDim; i++) rData.mDetJacobian *= 0.5 * (mKnots(i, 1) - mKnots(i, 0)); if (rData.mDetJacobian == 0) throw Exception(std::string("[") + __PRETTY_FUNCTION__ + "] Determinant of the Jacobian is zero, no inversion possible."); Eigen::Matrix<double, TDim, TDim> invJacobian = jacobian.inverse(); // calculate shape functions and their derivatives for (auto dof : this->mInterpolationType->GetDofs()) { if (dof == Node::eDof::COORDINATES) continue; const InterpolationBase& interpolationType = this->mInterpolationType->Get(dof); rData.mNIGA[dof] = interpolationType.MatrixNIGA(ipCoords, mKnotIDs); rData.mB[dof] = this->CalculateMatrixB( dof, interpolationType.DerivativeShapeFunctionsNaturalIGA(ipCoords, mKnotIDs), invJacobian); } }
bool pcl::gpu::KinfuTracker::operator() (const DepthMap& depth_raw, Eigen::Affine3f *hint) { device::Intr intr (fx_, fy_, cx_, cy_); if (!disable_icp_) { { //ScopeTime time(">>> Bilateral, pyr-down-all, create-maps-all"); //depth_raw.copyTo(depths_curr[0]); device::bilateralFilter (depth_raw, depths_curr_[0]); if (max_icp_distance_ > 0) device::truncateDepth(depths_curr_[0], max_icp_distance_); for (int i = 1; i < LEVELS; ++i) device::pyrDown (depths_curr_[i-1], depths_curr_[i]); for (int i = 0; i < LEVELS; ++i) { device::createVMap (intr(i), depths_curr_[i], vmaps_curr_[i]); //device::createNMap(vmaps_curr_[i], nmaps_curr_[i]); computeNormalsEigen (vmaps_curr_[i], nmaps_curr_[i]); } pcl::device::sync (); } //can't perform more on first frame if (global_time_ == 0) { Matrix3frm init_Rcam = rmats_[0]; // [Ri|ti] - pos of camera, i.e. Vector3f init_tcam = tvecs_[0]; // transform from camera to global coo space for (i-1)th camera pose Mat33& device_Rcam = device_cast<Mat33> (init_Rcam); float3& device_tcam = device_cast<float3>(init_tcam); Matrix3frm init_Rcam_inv = init_Rcam.inverse (); Mat33& device_Rcam_inv = device_cast<Mat33> (init_Rcam_inv); float3 device_volume_size = device_cast<const float3>(tsdf_volume_->getSize()); //integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcam_inv, device_tcam, tranc_dist, volume_); device::integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcam_inv, device_tcam, tsdf_volume_->getTsdfTruncDist(), tsdf_volume_->data(), depthRawScaled_); for (int i = 0; i < LEVELS; ++i) device::tranformMaps (vmaps_curr_[i], nmaps_curr_[i], device_Rcam, device_tcam, vmaps_g_prev_[i], nmaps_g_prev_[i]); ++global_time_; return (false); } /////////////////////////////////////////////////////////////////////////////////////////// // Iterative Closest Point Matrix3frm Rprev = rmats_[global_time_ - 1]; // [Ri|ti] - pos of camera, i.e. Vector3f tprev = tvecs_[global_time_ - 1]; // tranfrom from camera to global coo space for (i-1)th camera pose Matrix3frm Rprev_inv = Rprev.inverse (); //Rprev.t(); //Mat33& device_Rprev = device_cast<Mat33> (Rprev); Mat33& device_Rprev_inv = device_cast<Mat33> (Rprev_inv); float3& device_tprev = device_cast<float3> (tprev); Matrix3frm Rcurr; Vector3f tcurr; if(hint) { Rcurr = hint->rotation().matrix(); tcurr = hint->translation().matrix(); } else { Rcurr = Rprev; // transform to global coo for ith camera pose tcurr = tprev; } { //ScopeTime time("icp-all"); for (int level_index = LEVELS-1; level_index>=0; --level_index) { int iter_num = icp_iterations_[level_index]; MapArr& vmap_curr = vmaps_curr_[level_index]; MapArr& nmap_curr = nmaps_curr_[level_index]; //MapArr& vmap_g_curr = vmaps_g_curr_[level_index]; //MapArr& nmap_g_curr = nmaps_g_curr_[level_index]; MapArr& vmap_g_prev = vmaps_g_prev_[level_index]; MapArr& nmap_g_prev = nmaps_g_prev_[level_index]; //CorespMap& coresp = coresps_[level_index]; for (int iter = 0; iter < iter_num; ++iter) { Mat33& device_Rcurr = device_cast<Mat33> (Rcurr); float3& device_tcurr = device_cast<float3>(tcurr); Eigen::Matrix<double, 6, 6, Eigen::RowMajor> A; Eigen::Matrix<double, 6, 1> b; #if 0 device::tranformMaps(vmap_curr, nmap_curr, device_Rcurr, device_tcurr, vmap_g_curr, nmap_g_curr); findCoresp(vmap_g_curr, nmap_g_curr, device_Rprev_inv, device_tprev, intr(level_index), vmap_g_prev, nmap_g_prev, distThres_, angleThres_, coresp); device::estimateTransform(vmap_g_prev, nmap_g_prev, vmap_g_curr, coresp, gbuf_, sumbuf_, A.data(), b.data()); //cv::gpu::GpuMat ma(coresp.rows(), coresp.cols(), CV_32S, coresp.ptr(), coresp.step()); //cv::Mat cpu; //ma.download(cpu); //cv::imshow(names[level_index] + string(" --- coresp white == -1"), cpu == -1); #else estimateCombined (device_Rcurr, device_tcurr, vmap_curr, nmap_curr, device_Rprev_inv, device_tprev, intr (level_index), vmap_g_prev, nmap_g_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data ()); #endif //checking nullspace double det = A.determinant (); if (fabs (det) < 1e-15 || pcl_isnan (det)) { if (pcl_isnan (det)) cout << "qnan" << endl; reset (); return (false); } //float maxc = A.maxCoeff(); Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b).cast<float>(); //Eigen::Matrix<float, 6, 1> result = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b); float alpha = result (0); float beta = result (1); float gamma = result (2); Eigen::Matrix3f Rinc = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ()); Vector3f tinc = result.tail<3> (); //compose tcurr = Rinc * tcurr + tinc; Rcurr = Rinc * Rcurr; } } } //save transform rmats_.push_back (Rcurr); tvecs_.push_back (tcurr); } else /* if (disable_icp_) */ { if (global_time_ == 0) ++global_time_; Matrix3frm Rcurr = rmats_[global_time_ - 1]; Vector3f tcurr = tvecs_[global_time_ - 1]; rmats_.push_back (Rcurr); tvecs_.push_back (tcurr); } Matrix3frm Rprev = rmats_[global_time_ - 1]; Vector3f tprev = tvecs_[global_time_ - 1]; Matrix3frm Rcurr = rmats_.back(); Vector3f tcurr = tvecs_.back(); /////////////////////////////////////////////////////////////////////////////////////////// // Integration check - We do not integrate volume if camera does not move. float rnorm = rodrigues2(Rcurr.inverse() * Rprev).norm(); float tnorm = (tcurr - tprev).norm(); const float alpha = 1.f; bool integrate = (rnorm + alpha * tnorm)/2 >= integration_metric_threshold_; if (disable_icp_) integrate = true; /////////////////////////////////////////////////////////////////////////////////////////// // Volume integration float3 device_volume_size = device_cast<const float3> (tsdf_volume_->getSize()); Matrix3frm Rcurr_inv = Rcurr.inverse (); Mat33& device_Rcurr_inv = device_cast<Mat33> (Rcurr_inv); float3& device_tcurr = device_cast<float3> (tcurr); if (integrate) { //ScopeTime time("tsdf"); //integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcurr_inv, device_tcurr, tranc_dist, volume_); integrateTsdfVolume (depth_raw, intr, device_volume_size, device_Rcurr_inv, device_tcurr, tsdf_volume_->getTsdfTruncDist(), tsdf_volume_->data(), depthRawScaled_); } /////////////////////////////////////////////////////////////////////////////////////////// // Ray casting Mat33& device_Rcurr = device_cast<Mat33> (Rcurr); { //ScopeTime time("ray-cast-all"); raycast (intr, device_Rcurr, device_tcurr, tsdf_volume_->getTsdfTruncDist(), device_volume_size, tsdf_volume_->data(), vmaps_g_prev_[0], nmaps_g_prev_[0]); for (int i = 1; i < LEVELS; ++i) { resizeVMap (vmaps_g_prev_[i-1], vmaps_g_prev_[i]); resizeNMap (nmaps_g_prev_[i-1], nmaps_g_prev_[i]); } pcl::device::sync (); } ++global_time_; return (true); }
inline T determinant(const Eigen::Matrix<T,R,C>& m) { stan::math::validate_square(m,"determinant"); return m.determinant(); }
bool pcl::gpu::KinfuTracker::operator() (const DepthMap& depth_raw) { device::Intr intr (fx_, fy_, cx_, cy_); { //ScopeTime time(">>> Bilateral, pyr-down-all, create-maps-all"); //depth_raw.copyTo(depths_curr[0]); device::bilateralFilter (depth_raw, depths_curr_[0]); if (max_icp_distance_ > 0) device::truncateDepth(depths_curr_[0], max_icp_distance_); for (int i = 1; i < LEVELS; ++i) device::pyrDown (depths_curr_[i-1], depths_curr_[i]); for (int i = 0; i < LEVELS; ++i) { device::createVMap (intr(i), depths_curr_[i], vmaps_curr_[i]); //device::createNMap(vmaps_curr_[i], nmaps_curr_[i]); computeNormalsEigen (vmaps_curr_[i], nmaps_curr_[i]); } pcl::device::sync (); } //can't perform more on first frame if (global_time_ == 0) { Matrix3frm initial_cam_rot = rmats_[0]; // [Ri|ti] - pos of camera, i.e. Matrix3frm initial_cam_rot_inv = initial_cam_rot.inverse (); Vector3f initial_cam_trans = tvecs_[0]; // transform from camera to global coo space for (i-1)th camera pose Mat33& device_initial_cam_rot = device_cast<Mat33> (initial_cam_rot); Mat33& device_initial_cam_rot_inv = device_cast<Mat33> (initial_cam_rot_inv); float3& device_initial_cam_trans = device_cast<float3>(initial_cam_trans); float3 device_volume_size = device_cast<const float3>(tsdf_volume_->getSize()); device::integrateTsdfVolume(depth_raw, intr, device_volume_size, device_initial_cam_rot_inv, device_initial_cam_trans, tsdf_volume_->getTsdfTruncDist(), tsdf_volume_->data(), getCyclicalBufferStructure (), depthRawScaled_); /* Matrix3frm init_Rcam = rmats_[0]; // [Ri|ti] - pos of camera, i.e. Vector3f init_tcam = tvecs_[0]; // transform from camera to global coo space for (i-1)th camera pose Mat33& device_Rcam = device_cast<Mat33> (init_Rcam); float3& device_tcam = device_cast<float3>(init_tcam); Matrix3frm init_Rcam_inv = init_Rcam.inverse (); Mat33& device_Rcam_inv = device_cast<Mat33> (init_Rcam_inv); float3 device_volume_size = device_cast<const float3>(tsdf_volume_->getSize ()); //integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcam_inv, device_tcam, tranc_dist, volume_); device::integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcam_inv, device_tcam, tsdf_volume_->getTsdfTruncDist (), tsdf_volume_->data (), getCyclicalBufferStructure (), depthRawScaled_); */ for (int i = 0; i < LEVELS; ++i) device::tranformMaps (vmaps_curr_[i], nmaps_curr_[i], device_initial_cam_rot, device_initial_cam_trans, vmaps_g_prev_[i], nmaps_g_prev_[i]); if(perform_last_scan_) finished_ = true; ++global_time_; return (false); } /////////////////////////////////////////////////////////////////////////////////////////// // Iterative Closest Point // GET PREVIOUS GLOBAL TRANSFORM // Previous global rotation Matrix3frm cam_rot_global_prev = rmats_[global_time_ - 1]; // [Ri|ti] - pos of camera, i.e. // Previous global translation Vector3f cam_trans_global_prev = tvecs_[global_time_ - 1]; // transform from camera to global coo space for (i-1)th camera pose // Previous global inverse rotation Matrix3frm cam_rot_global_prev_inv = cam_rot_global_prev.inverse (); // Rprev.t(); // GET CURRENT GLOBAL TRANSFORM Matrix3frm cam_rot_global_curr = cam_rot_global_prev; // transform to global coo for ith camera pose Vector3f cam_trans_global_curr = cam_trans_global_prev; // CONVERT TO DEVICE TYPES //LOCAL PREVIOUS TRANSFORM Mat33& device_cam_rot_local_prev_inv = device_cast<Mat33> (cam_rot_global_prev_inv); float3& device_cam_trans_local_prev_tmp = device_cast<float3> (cam_trans_global_prev); float3 device_cam_trans_local_prev; device_cam_trans_local_prev.x = device_cam_trans_local_prev_tmp.x - (getCyclicalBufferStructure ())->origin_metric.x; device_cam_trans_local_prev.y = device_cam_trans_local_prev_tmp.y - (getCyclicalBufferStructure ())->origin_metric.y; device_cam_trans_local_prev.z = device_cam_trans_local_prev_tmp.z - (getCyclicalBufferStructure ())->origin_metric.z; /* Matrix3frm Rprev = rmats_[global_time_ - 1]; // [Ri|ti] - pos of camera, i.e. Vector3f tprev = tvecs_[global_time_ - 1]; // tranfrom from camera to global coo space for (i-1)th camera pose Matrix3frm Rprev_inv = Rprev.inverse (); //Rprev.t(); //Mat33& device_Rprev = device_cast<Mat33> (Rprev); Mat33& device_Rprev_inv = device_cast<Mat33> (Rprev_inv); float3& device_tprev = device_cast<float3> (tprev); Matrix3frm Rcurr = Rprev; // tranform to global coo for ith camera pose Vector3f tcurr = tprev; */ { //ScopeTime time("icp-all"); for (int level_index = LEVELS-1; level_index>=0; --level_index) { int iter_num = icp_iterations_[level_index]; // current maps MapArr& vmap_curr = vmaps_curr_[level_index]; MapArr& nmap_curr = nmaps_curr_[level_index]; // previous maps MapArr& vmap_g_prev = vmaps_g_prev_[level_index]; MapArr& nmap_g_prev = nmaps_g_prev_[level_index]; // We need to transform the maps from global to the local coordinates Mat33& rotation_id = device_cast<Mat33> (rmats_[0]); // Identity Rotation Matrix. Because we only need translation float3 cube_origin = (getCyclicalBufferStructure ())->origin_metric; cube_origin.x = -cube_origin.x; cube_origin.y = -cube_origin.y; cube_origin.z = -cube_origin.z; MapArr& vmap_temp = vmap_g_prev; MapArr& nmap_temp = nmap_g_prev; device::tranformMaps (vmap_temp, nmap_temp, rotation_id, cube_origin, vmap_g_prev, nmap_g_prev); /* MapArr& vmap_curr = vmaps_curr_[level_index]; MapArr& nmap_curr = nmaps_curr_[level_index]; //MapArr& vmap_g_curr = vmaps_g_curr_[level_index]; //MapArr& nmap_g_curr = nmaps_g_curr_[level_index]; MapArr& vmap_g_prev = vmaps_g_prev_[level_index]; MapArr& nmap_g_prev = nmaps_g_prev_[level_index]; */ //CorespMap& coresp = coresps_[level_index]; for (int iter = 0; iter < iter_num; ++iter) { /* Mat33& device_Rcurr = device_cast<Mat33> (Rcurr); float3& device_tcurr = device_cast<float3>(tcurr); */ //CONVERT TO DEVICE TYPES // CURRENT LOCAL TRANSFORM Mat33& device_cam_rot_local_curr = device_cast<Mat33> (cam_rot_global_curr);/// We have not dealt with changes in rotations float3& device_cam_trans_local_curr_tmp = device_cast<float3> (cam_trans_global_curr); float3 device_cam_trans_local_curr; device_cam_trans_local_curr.x = device_cam_trans_local_curr_tmp.x - (getCyclicalBufferStructure ())->origin_metric.x; device_cam_trans_local_curr.y = device_cam_trans_local_curr_tmp.y - (getCyclicalBufferStructure ())->origin_metric.y; device_cam_trans_local_curr.z = device_cam_trans_local_curr_tmp.z - (getCyclicalBufferStructure ())->origin_metric.z; Eigen::Matrix<double, 6, 6, Eigen::RowMajor> A; Eigen::Matrix<double, 6, 1> b; estimateCombined (device_cam_rot_local_curr, device_cam_trans_local_curr, vmap_curr, nmap_curr, device_cam_rot_local_prev_inv, device_cam_trans_local_prev, intr (level_index), vmap_g_prev, nmap_g_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data ()); /* estimateCombined (device_Rcurr, device_tcurr, vmap_curr, nmap_curr, device_Rprev_inv, device_tprev, intr (level_index), vmap_g_prev, nmap_g_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data ()); */ //checking nullspace double det = A.determinant (); if ( fabs (det) < 1e-15 || pcl_isnan (det) ) { if (pcl_isnan (det)) cout << "qnan" << endl; PCL_ERROR ("LOST...\n"); reset (); return (false); } //float maxc = A.maxCoeff(); Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b).cast<float>(); //Eigen::Matrix<float, 6, 1> result = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b); float alpha = result (0); float beta = result (1); float gamma = result (2); Eigen::Matrix3f cam_rot_incremental = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ()); Vector3f cam_trans_incremental = result.tail<3> (); //compose cam_trans_global_curr = cam_rot_incremental * cam_trans_global_curr + cam_trans_incremental; cam_rot_global_curr = cam_rot_incremental * cam_rot_global_curr; /* tcurr = Rinc * tcurr + tinc; Rcurr = Rinc * Rcurr; */ } } } //save tranform rmats_.push_back (cam_rot_global_curr); tvecs_.push_back (cam_trans_global_curr); /* rmats_.push_back (Rcurr); tvecs_.push_back (tcurr); */ //check for shift bool has_shifted = cyclical_.checkForShift(tsdf_volume_, getCameraPose (), 0.6 * volume_size_, true, perform_last_scan_); if(has_shifted) PCL_WARN ("SHIFTING\n"); // get NEW local rotation Matrix3frm cam_rot_local_curr_inv = cam_rot_global_curr.inverse (); Mat33& device_cam_rot_local_curr_inv = device_cast<Mat33> (cam_rot_local_curr_inv); Mat33& device_cam_rot_local_curr = device_cast<Mat33> (cam_rot_global_curr); // get NEW local translation float3& device_cam_trans_local_curr_tmp = device_cast<float3> (cam_trans_global_curr); float3 device_cam_trans_local_curr; device_cam_trans_local_curr.x = device_cam_trans_local_curr_tmp.x - (getCyclicalBufferStructure ())->origin_metric.x; device_cam_trans_local_curr.y = device_cam_trans_local_curr_tmp.y - (getCyclicalBufferStructure ())->origin_metric.y; device_cam_trans_local_curr.z = device_cam_trans_local_curr_tmp.z - (getCyclicalBufferStructure ())->origin_metric.z; /////////////////////////////////////////////////////////////////////////////////////////// // Integration check - We do not integrate volume if camera does not move. float rnorm = rodrigues2(cam_rot_global_curr.inverse() * cam_rot_global_prev).norm(); float tnorm = (cam_trans_global_curr - cam_trans_global_prev).norm(); const float alpha = 1.f; bool integrate = (rnorm + alpha * tnorm)/2 >= integration_metric_threshold_; //~ if(integrate) //~ std::cout << "\tCamera movement since previous frame was " << (rnorm + alpha * tnorm)/2 << " integrate is set to " << integrate << std::endl; //~ else //~ std::cout << "Camera movement since previous frame was " << (rnorm + alpha * tnorm)/2 << " integrate is set to " << integrate << std::endl; /////////////////////////////////////////////////////////////////////////////////////////// // Volume integration float3 device_volume_size = device_cast<const float3> (tsdf_volume_->getSize()); /* Matrix3frm Rcurr_inv = Rcurr.inverse (); Mat33& device_Rcurr_inv = device_cast<Mat33> (Rcurr_inv); float3& device_tcurr = device_cast<float3> (tcurr);*/ if (integrate) { //integrateTsdfVolume(depth_raw, intr, device_volume_size, device_Rcurr_inv, device_tcurr, tranc_dist, volume_); integrateTsdfVolume (depth_raw, intr, device_volume_size, device_cam_rot_local_curr_inv, device_cam_trans_local_curr, tsdf_volume_->getTsdfTruncDist (), tsdf_volume_->data (), getCyclicalBufferStructure (), depthRawScaled_); } /////////////////////////////////////////////////////////////////////////////////////////// // Ray casting /*Mat33& device_Rcurr = device_cast<Mat33> (Rcurr);*/ { raycast (intr, device_cam_rot_local_curr, device_cam_trans_local_curr, tsdf_volume_->getTsdfTruncDist (), device_volume_size, tsdf_volume_->data (), getCyclicalBufferStructure (), vmaps_g_prev_[0], nmaps_g_prev_[0]); // POST-PROCESSING: We need to transform the newly raycasted maps into the global space. Mat33& rotation_id = device_cast<Mat33> (rmats_[0]); /// Identity Rotation Matrix. Because we only need translation float3 cube_origin = (getCyclicalBufferStructure ())->origin_metric; //~ PCL_INFO ("Raycasting with cube origin at %f, %f, %f\n", cube_origin.x, cube_origin.y, cube_origin.z); MapArr& vmap_temp = vmaps_g_prev_[0]; MapArr& nmap_temp = nmaps_g_prev_[0]; device::tranformMaps (vmap_temp, nmap_temp, rotation_id, cube_origin, vmaps_g_prev_[0], nmaps_g_prev_[0]); for (int i = 1; i < LEVELS; ++i) { resizeVMap (vmaps_g_prev_[i-1], vmaps_g_prev_[i]); resizeNMap (nmaps_g_prev_[i-1], nmaps_g_prev_[i]); } pcl::device::sync (); } if(has_shifted && perform_last_scan_) extractAndMeshWorld (); ++global_time_; return (true); }
inline T determinant(const Eigen::Matrix<T,R,C>& m) { stan::math::check_square("determinant(%1%)",m,"m",(double*)0); return m.determinant(); }
inline bool pcl::gpu::kinfuLS::KinfuTracker::performICP(const Intr& cam_intrinsics, Matrix3frm& previous_global_rotation, Vector3f& previous_global_translation, Matrix3frm& current_global_rotation , Vector3f& current_global_translation) { if(disable_icp_) { lost_=false; return (true); } // Compute inverse rotation Matrix3frm previous_global_rotation_inv = previous_global_rotation.inverse (); // Rprev.t(); /////////////////////////////////////////////// // Convert pose to device type Mat33 device_cam_rot_local_prev_inv; float3 device_cam_trans_local_prev; convertTransforms(previous_global_rotation_inv, previous_global_translation, device_cam_rot_local_prev_inv, device_cam_trans_local_prev); device_cam_trans_local_prev -= getCyclicalBufferStructure ()->origin_metric; ; // Use temporary pose, so that we modify the current global pose only if ICP converged Matrix3frm resulting_rotation; Vector3f resulting_translation; // Initialize output pose to current pose current_global_rotation = previous_global_rotation; current_global_translation = previous_global_translation; /////////////////////////////////////////////// // Run ICP { //ScopeTime time("icp-all"); for (int level_index = LEVELS-1; level_index>=0; --level_index) { int iter_num = icp_iterations_[level_index]; // current vertex and normal maps MapArr& vmap_curr = vmaps_curr_[level_index]; MapArr& nmap_curr = nmaps_curr_[level_index]; // previous vertex and normal maps MapArr& vmap_g_prev = vmaps_g_prev_[level_index]; MapArr& nmap_g_prev = nmaps_g_prev_[level_index]; // We need to transform the maps from global to local coordinates Mat33& rotation_id = device_cast<Mat33> (rmats_[0]); // Identity Rotation Matrix. Because we only need translation float3 cube_origin = (getCyclicalBufferStructure ())->origin_metric; cube_origin = -cube_origin; MapArr& vmap_temp = vmap_g_prev; MapArr& nmap_temp = nmap_g_prev; transformMaps (vmap_temp, nmap_temp, rotation_id, cube_origin, vmap_g_prev, nmap_g_prev); // run ICP for iter_num iterations (return false when lost) for (int iter = 0; iter < iter_num; ++iter) { //CONVERT POSES TO DEVICE TYPES // CURRENT LOCAL POSE Mat33 device_current_rotation = device_cast<Mat33> (current_global_rotation); // We do not deal with changes in rotations float3 device_current_translation_local = device_cast<float3> (current_global_translation); device_current_translation_local -= getCyclicalBufferStructure ()->origin_metric; Eigen::Matrix<double, 6, 6, Eigen::RowMajor> A; Eigen::Matrix<double, 6, 1> b; // call the ICP function (see paper by Kok-Lim Low "Linear Least-squares Optimization for Point-to-Plane ICP Surface Registration") estimateCombined (device_current_rotation, device_current_translation_local, vmap_curr, nmap_curr, device_cam_rot_local_prev_inv, device_cam_trans_local_prev, cam_intrinsics (level_index), vmap_g_prev, nmap_g_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data ()); // checking nullspace double det = A.determinant (); if ( fabs (det) < 1e-15 /*100000 */ || pcl_isnan (det) ) //TODO find a threshold that makes ICP track well, but prevents it from generating wrong transforms { if (pcl_isnan (det)) cout << "qnan" << endl; if(lost_ == false) PCL_ERROR ("ICP LOST... PLEASE COME BACK TO THE LAST VALID POSE (green)\n"); //reset (); //GUI will now show the user that ICP is lost. User needs to press "R" to reset the volume lost_ = true; return (false); } Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b).cast<float>(); float alpha = result (0); float beta = result (1); float gamma = result (2); // deduce incremental rotation and translation from ICP's results Eigen::Matrix3f cam_rot_incremental = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ()); Vector3f cam_trans_incremental = result.tail<3> (); //compose global pose current_global_translation = cam_rot_incremental * current_global_translation + cam_trans_incremental; current_global_rotation = cam_rot_incremental * current_global_rotation; } } } // ICP has converged lost_ = false; return (true); }
void SurfelMapPublisher::publishSurfelMarkers(const boost::shared_ptr<MapType>& map) { if (m_markerPublisher.getNumSubscribers() == 0) return; unsigned int markerId = 0; std::vector<float> cellSizes; typename MapType::AlignedCellVectorVector occupiedCells; std::vector<std::vector<pcl::PointXYZ>> occupiedCellsCenters; int levels = map->getLevels(); for (unsigned int i = 0; i < m_lastSurfelMarkerCount; ++i) { for (unsigned int l = 0; l < levels; l++) { visualization_msgs::Marker marker; marker.header.frame_id = map->getFrameId(); marker.header.stamp = map->getLastUpdateTimestamp(); marker.ns = boost::lexical_cast<std::string>(l); marker.id = i; marker.type = marker.SPHERE; marker.action = marker.DELETE; m_markerPublisher.publish(marker); } } for (unsigned int l = 0; l < levels; l++) { cellSizes.push_back(map->getCellSize(l)); typename MapType::AlignedCellVector occupiedCellsTemp; std::vector<pcl::PointXYZ> occupiedCellsCentersTemp; map->getOccupiedCells(occupiedCellsTemp, l); map->getOccupiedCells(occupiedCellsCentersTemp, l); occupiedCells.push_back(occupiedCellsTemp); occupiedCellsCenters.push_back(occupiedCellsCentersTemp); } for (unsigned int l = 0; l < cellSizes.size(); l++) { float cellSize = cellSizes[l]; for (size_t i = 0; i < occupiedCells[l].size(); i++) { const mrs_laser_maps::Surfel& surfel = occupiedCells[l][i].surfel_; // if (surfel.num_points_ < 15 ) // continue; // // if ( surfel.unevaluated_ ) { // ROS_ERROR("not unevaluated surfel"); // continue; // } Eigen::Matrix<double, 3, 3> cov = surfel.cov_.block(0, 0, 3, 3); Eigen::EigenSolver<Eigen::Matrix3d> solver(cov); Eigen::Matrix<double, 3, 3> eigenvectors = solver.eigenvectors().real(); // double eigenvalues[3]; Eigen::Matrix<double, 3, 1> eigenvalues = solver.eigenvalues().real(); // for(int j = 0; j < 3; ++j) { // Eigen::Matrix<double, 3, 1> mult = cov * eigenvectors.col(j); // eigenvalues[j] = mult(0,0) / eigenvectors.col(j)(0); // } if (eigenvectors.determinant() < 0) { eigenvectors.col(0)(0) = -eigenvectors.col(0)(0); eigenvectors.col(0)(1) = -eigenvectors.col(0)(1); eigenvectors.col(0)(2) = -eigenvectors.col(0)(2); } Eigen::Quaternion<double> q = Eigen::Quaternion<double>(eigenvectors); visualization_msgs::Marker marker; marker.header.frame_id = map->getFrameId(); marker.header.stamp = map->getLastUpdateTimestamp(); marker.ns = boost::lexical_cast<std::string>(l); marker.id = markerId++; marker.type = marker.SPHERE; marker.action = marker.ADD; marker.pose.position.x = occupiedCellsCenters[l][i].x - cellSize / 2 + surfel.mean_(0); marker.pose.position.y = occupiedCellsCenters[l][i].y - cellSize / 2 + surfel.mean_(1); marker.pose.position.z = occupiedCellsCenters[l][i].z - cellSize / 2 + surfel.mean_(2); marker.pose.orientation.w = q.w(); marker.pose.orientation.x = q.x(); marker.pose.orientation.y = q.y(); marker.pose.orientation.z = q.z(); marker.scale.x = std::max(sqrt(eigenvalues[0]) * 3, 0.01); marker.scale.y = std::max(sqrt(eigenvalues[1]) * 3, 0.01); marker.scale.z = std::max(sqrt(eigenvalues[2]) * 3, 0.01); marker.color.a = 1.0; double dot = surfel.normal_.dot(Eigen::Vector3d(0., 0., 1.)); if (surfel.normal_.norm() > 1e-10) dot /= surfel.normal_.norm(); double angle = acos(fabs(dot)); double angle_normalized = 2. * angle / M_PI; marker.color.r = ColorMapJet::red(angle_normalized); // fabs(surfel.normal_(0)); marker.color.g = ColorMapJet::green(angle_normalized); marker.color.b = ColorMapJet::blue(angle_normalized); m_markerPublisher.publish(marker); } } m_lastSurfelMarkerCount = markerId; }
inline bool pcl::gpu::kinfuLS::KinfuTracker::performPairWiseICP(const Intr cam_intrinsics, Matrix3frm& resulting_rotation , Vector3f& resulting_translation) { // we assume that both v and n maps are in the same coordinate space // initialize rotation and translation to respectively identity and zero Matrix3frm previous_rotation = Eigen::Matrix3f::Identity (); Matrix3frm previous_rotation_inv = previous_rotation.inverse (); Vector3f previous_translation = Vector3f(0,0,0); /////////////////////////////////////////////// // Convert pose to device type Mat33 device_cam_rot_prev_inv; float3 device_cam_trans_prev; convertTransforms(previous_rotation_inv, previous_translation, device_cam_rot_prev_inv, device_cam_trans_prev); // Initialize output pose to current pose (i.e. identity and zero translation) Matrix3frm current_rotation = previous_rotation; Vector3f current_translation = previous_translation; /////////////////////////////////////////////// // Run ICP { //ScopeTime time("icp-all"); for (int level_index = LEVELS-1; level_index>=0; --level_index) { int iter_num = icp_iterations_[level_index]; // current vertex and normal maps MapArr& vmap_curr = vmaps_curr_[level_index]; MapArr& nmap_curr = nmaps_curr_[level_index]; // previous vertex and normal maps MapArr& vmap_prev = vmaps_prev_[level_index]; MapArr& nmap_prev = nmaps_prev_[level_index]; // no need to transform maps from global to local since they are both in camera coordinates // run ICP for iter_num iterations (return false when lost) for (int iter = 0; iter < iter_num; ++iter) { //CONVERT POSES TO DEVICE TYPES // CURRENT LOCAL POSE Mat33 device_current_rotation = device_cast<Mat33> (current_rotation); float3 device_current_translation_local = device_cast<float3> (current_translation); Eigen::Matrix<double, 6, 6, Eigen::RowMajor> A; Eigen::Matrix<double, 6, 1> b; // call the ICP function (see paper by Kok-Lim Low "Linear Least-squares Optimization for Point-to-Plane ICP Surface Registration") estimateCombined (device_current_rotation, device_current_translation_local, vmap_curr, nmap_curr, device_cam_rot_prev_inv, device_cam_trans_prev, cam_intrinsics (level_index), vmap_prev, nmap_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data ()); // checking nullspace double det = A.determinant (); if ( fabs (det) < 1e-15 || pcl_isnan (det) ) { if (pcl_isnan (det)) cout << "qnan" << endl; PCL_WARN ("ICP PairWise LOST...\n"); //reset (); return (false); } Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b).cast<float>(); float alpha = result (0); float beta = result (1); float gamma = result (2); // deduce incremental rotation and translation from ICP's results Eigen::Matrix3f cam_rot_incremental = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ()); Vector3f cam_trans_incremental = result.tail<3> (); //compose global pose current_translation = cam_rot_incremental * current_translation + cam_trans_incremental; current_rotation = cam_rot_incremental * current_rotation; } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // since raw depthmaps are quite noisy, we make sure the estimated transform is big enought to be taken into account float rnorm = rodrigues2(current_rotation).norm(); float tnorm = (current_translation).norm(); const float alpha = 1.f; bool integrate = (rnorm + alpha * tnorm)/2 >= integration_metric_threshold_ * 2.0f; if(integrate) { resulting_rotation = current_rotation; resulting_translation = current_translation; } else { resulting_rotation = Eigen::Matrix3f::Identity (); resulting_translation = Vector3f(0,0,0); } // ICP has converged return (true); }
inline T determinant(const Eigen::Matrix<T, R, C>& m) { check_square("determinant", "m", m); return m.determinant(); }
bool MyPointCloud::alignPointClouds(std::vector<Matrix3frm>& Rcam, std::vector<Vector3f>& tcam, MyPointCloud *globalPreviousPointCloud, device::Intr& intrinsics, int globalTime) { Matrix3frm Rprev = Rcam[globalTime - 1]; // [Ri|ti] - pos of camera, i.e. Vector3f tprev = tcam[globalTime - 1]; // tranfrom from camera to global coo space for (i-1)th camera pose Matrix3frm Rprev_inv = Rprev.inverse(); //Rprev.t(); device::Mat33& device_Rprev_inv = device_cast<device::Mat33> (Rprev_inv); float3& device_tprev = device_cast<float3> (tprev); Matrix3frm Rcurr = Rprev; // tranform to global coo for ith camera pose Vector3f tcurr = tprev; #pragma omp parallel for for(int level = LEVELS - 1; level >= 0; --level) { int iterations = icpIterations_[level]; #pragma omp parallel for for(int iteration = 0; iteration < iterations; ++iteration) { { printf(" ICP level %d iteration %d", level, iteration); pcl::ScopeTime time(""); device::Mat33& device_Rcurr = device_cast<device::Mat33> (Rcurr); float3& device_tcurr = device_cast<float3>(tcurr); Eigen::Matrix<float, 6, 6, Eigen::RowMajor> A; Eigen::Matrix<float, 6, 1> b; if(level == 2 && iteration == 0) error_.create(rows_ * 4, cols_); device::estimateCombined (device_Rcurr, device_tcurr, vmaps_[level], nmaps_[level], device_Rprev_inv, device_tprev, intrinsics (level), globalPreviousPointCloud->getVertexMaps()[level], globalPreviousPointCloud->getNormalMaps()[level], distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data (), error_); //checking nullspace float det = A.determinant (); if (fabs (det) < 1e-15 || !pcl::device::valid_host (det)) { // printf("ICP failed at level %d, iteration %d (global time %d)\n", level, iteration, globalTime); return (false); } //else printf("ICP succeed at level %d, iteration %d (global time %d)\n", level, iteration, globalTime); Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b); //Eigen::Matrix<float, 6, 1> result = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b); float alpha = result (0); float beta = result (1); float gamma = result (2); Eigen::Matrix3f Rinc = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ()); Vector3f tinc = result.tail<3> (); //compose tcurr = Rinc * tcurr + tinc; Rcurr = Rinc * Rcurr; } } } //save tranform Rcam[globalTime] = Rcurr; tcam[globalTime] = tcurr; return (true); }