void debugDerivatives(const double *computed_grad, const K_OPTIM::ErrorFunction & function, const K_OPTIM::OptimizationParameter & pos) { TooN::Vector<> tmppos(function.numParameters()); tmppos = TooN::Zeros; TooN::Matrix<TooN::Dynamic, 2> mat = TooN::numerical_gradient_with_errors(ComposeWrapperErrorFunction(function, pos), tmppos); for (int i = 0; i < mat.num_rows(); ++i) { fprintf(stderr, "%i: %e (%f) <=> %e%s\n", i, mat(i, 0), mat(i, 1), computed_grad[i], (fabs(mat(i, 0) - computed_grad[i]) > 1e-4) ? " !!!" : ""); } }
// computes the orientation from (e1,e2,e3) -> (a,(a^b)^a,a^b), which means that b the second vector is in the a, b plane static inline TooN::SO3<> canonicalOrientation( const TooN::Vector<3> & a, const TooN::Vector<3> & b ){ TooN::Vector<3> n = a ^ b; if(norm_sq(n) < 1e-30) return TooN::SO3<>(); TooN::Matrix<3> result; result.T()[0] = unit(a); result.T()[2] = unit(n); result.T()[1] = result.T()[2] ^ result.T()[0]; return TooN::SO3<> (result); }
void ofxCalibImage::guessInitialPose(ofxATANCamera &Camera){ // First, find a homography which maps the grid to the unprojected image coords // Use the standard null-space-of-SVD-thing to find 9 homography parms // (c.f. appendix of thesis) int nPoints = static_cast<int>(mvGridCorners.size()); TooN::Matrix<> m2Nx9(2*nPoints, 9); for(int n=0; n<nPoints; n++){ // First, un-project the points to the image plane TooN::Vector<2> v2UnProj = Camera.UnProject(mvGridCorners[n].Params.v2Pos); double u = v2UnProj[0]; double v = v2UnProj[1]; // Then fill in the matrix.. double x = mvGridCorners[n].irGridPos.x; double y = mvGridCorners[n].irGridPos.y; m2Nx9[n*2+0][0] = x; m2Nx9[n*2+0][1] = y; m2Nx9[n*2+0][2] = 1; m2Nx9[n*2+0][3] = 0; m2Nx9[n*2+0][4] = 0; m2Nx9[n*2+0][5] = 0; m2Nx9[n*2+0][6] = -x*u; m2Nx9[n*2+0][7] = -y*u; m2Nx9[n*2+0][8] = -u; m2Nx9[n*2+1][0] = 0; m2Nx9[n*2+1][1] = 0; m2Nx9[n*2+1][2] = 0; m2Nx9[n*2+1][3] = x; m2Nx9[n*2+1][4] = y; m2Nx9[n*2+1][5] = 1; m2Nx9[n*2+1][6] = -x*v; m2Nx9[n*2+1][7] = -y*v; m2Nx9[n*2+1][8] = -v; } // The right null-space (should only be one) of the matrix gives the homography... TooN::SVD<> svdHomography(m2Nx9); TooN::Vector<9> vH = svdHomography.get_VT()[8]; TooN::Matrix<3> m3Homography; m3Homography[0] = vH.slice<0,3>(); m3Homography[1] = vH.slice<3,3>(); m3Homography[2] = vH.slice<6,3>(); // Fix up possibly poorly conditioned bits of the homography { TooN::SVD<2> svdTopLeftBit(m3Homography.slice<0,0,2,2>()); TooN::Vector<2> v2Diagonal = svdTopLeftBit.get_diagonal(); m3Homography = m3Homography / v2Diagonal[0]; v2Diagonal = v2Diagonal / v2Diagonal[0]; double dLambda2 = v2Diagonal[1]; TooN::Vector<2> v2b; // This is one hypothesis for v2b ; the other is the negative. v2b[0] = 0.0; v2b[1] = sqrt( 1.0 - (dLambda2 * dLambda2)); TooN::Vector<2> v2aprime = v2b * svdTopLeftBit.get_VT(); TooN::Vector<2> v2a = m3Homography[2].slice<0,2>(); double dDotProd = v2a * v2aprime; if(dDotProd>0) m3Homography[2].slice<0,2>() = v2aprime; else m3Homography[2].slice<0,2>() = -v2aprime; } // OK, now turn homography into something 3D ...simple gram-schmidt ortho-norm // Take 3x3 matrix H with column: abt // And add a new 3rd column: abct TooN::Matrix<3> mRotation; TooN::Vector<3> vTranslation; double dMag1 = sqrt(m3Homography.T()[0] * m3Homography.T()[0]); m3Homography = m3Homography / dMag1; mRotation.T()[0] = m3Homography.T()[0]; // ( all components of the first vector are removed from the second... mRotation.T()[1] = m3Homography.T()[1] - m3Homography.T()[0]*(m3Homography.T()[0]*m3Homography.T()[1]); mRotation.T()[1] /= sqrt(mRotation.T()[1] * mRotation.T()[1]); mRotation.T()[2] = mRotation.T()[0]^mRotation.T()[1]; vTranslation = m3Homography.T()[2]; // Store result mse3CamFromWorld.get_rotation()=mRotation; mse3CamFromWorld.get_translation() = vTranslation; };
// Publish the current tracker pose void SystemFrontendBase::PublishPose() { ROS_ASSERT(mpTracker); if (mpTracker->GetTrackingQuality() == Tracker::NONE || mpTracker->IsLost()) return; geometry_msgs::PoseWithCovarianceStamped pose_cov_msg; pose_cov_msg.header.stamp = mpTracker->GetCurrentTimestamp(); pose_cov_msg.header.frame_id = "vision_world"; TooN::SE3<> pose = mpTracker->GetCurrentPose().inverse(); TooN::Matrix<3> rot = pose.get_rotation().get_matrix(); TooN::Matrix<6> cov = mpTracker->GetCurrentCovariance(); // clear cross correlation cov.slice<0, 3, 3, 3>() = TooN::Zeros; cov.slice<3, 0, 3, 3>() = TooN::Zeros; // Change covariance matrix frame from camera to world cov.slice<0, 0, 3, 3>() = rot * cov.slice<0, 0, 3, 3>() * rot.T(); cov.slice<3, 3, 3, 3>() = rot * cov.slice<3, 3, 3, 3>() * rot.T(); // Some hacky heuristics here if (mpTracker->GetTrackingQuality() == Tracker::GOOD) { cov = cov * 1e2; } else if (mpTracker->GetTrackingQuality() == Tracker::DODGY) { cov = cov * 1e5; } else if (mpTracker->GetTrackingQuality() == Tracker::BAD) { cov = cov * 1e8; } pose_cov_msg.pose.pose = util::SE3ToPoseMsg(pose); int numElems = 6; for (int i = 0; i < numElems; ++i) { for (int j = 0; j < numElems; ++j) { pose_cov_msg.pose.covariance[i * numElems + j] = cov(i, j); } } mPoseWithCovPub.publish(pose_cov_msg); static tf::TransformBroadcaster br; tf::Transform transform; tf::poseMsgToTF(pose_cov_msg.pose.pose, transform); br.sendTransform(tf::StampedTransform(transform, mpTracker->GetCurrentTimestamp(), "vision_world", "vision_pose")); SE3Map mPoses = mpTracker->GetCurrentCameraPoses(); geometry_msgs::PoseArray pose_array_msg; pose_array_msg.header.stamp = pose_cov_msg.header.stamp; pose_array_msg.header.frame_id = pose_cov_msg.header.frame_id; pose_array_msg.poses.resize(1 + mPoses.size()); pose_array_msg.poses[0] = pose_cov_msg.pose.pose; int i = 1; for (SE3Map::iterator it = mPoses.begin(); it != mPoses.end(); ++it, ++i) { pose_array_msg.poses[i] = util::SE3ToPoseMsg(it->second.inverse()); tf::poseMsgToTF(pose_array_msg.poses[i], transform); br.sendTransform(tf::StampedTransform(transform, mpTracker->GetCurrentTimestamp(), "vision_world", it->first)); } mPoseArrayPub.publish(pose_array_msg); }