void EKFOA::process(const double delta_t, cv::Mat & frame, Eigen::Vector3d & rW, Eigen::Vector4d & qWR, Eigen::Matrix3d & axes_orientation_and_confidence, std::vector<Point3d> (& XYZs)[3], Delaunay & triangulation, Point3d & closest_point){ double time_total; std::vector<cv::Point2f> features_to_add; std::vector<Features_extra> features_extra; /* * EKF prediction (state and measurement prediction) */ time_total = (double)cv::getTickCount(); double time_prediction = (double)cv::getTickCount(); filter.predict_state_and_covariance(delta_t); filter.compute_features_h(cam, features_extra); time_prediction = (double)cv::getTickCount() - time_prediction; // std::cout << "predict = " << time_prediction/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl; /* * Sense and map management (delete features from EKF) */ double time_tracker = (double)cv::getTickCount(); motion_tracker.process(frame, features_extra, features_to_add); //TODO: Why is optical flow returning points outside the image??? time_tracker = (double)cv::getTickCount() - time_tracker; time_total = time_total + time_tracker; //do not count the time spent by the tracker //Delete no longer seen features from the state, covariance matrix and the features_extra: double time_del = (double)cv::getTickCount(); filter.delete_features(features_extra); time_del = (double)cv::getTickCount() - time_del; // std::cout << "delete = " << time_del/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl; /* * EKF Update step and map management (add new features to EKF) */ double time_update = (double)cv::getTickCount(); filter.update(cam, features_extra); time_update = (double)cv::getTickCount() - time_update; // std::cout << "update = " << time_update/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl; //Add new features double time_add = (double)cv::getTickCount(); filter.add_features_inverse_depth(cam, features_to_add); time_add = (double)cv::getTickCount() - time_add; // std::cout << "add_fea = " << time_add/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl; /* * Triangulation, surface and GUI data setting: */ double time_triangulation = (double)cv::getTickCount(); std::vector< std::pair<Point2d, size_t> > triangle_list; std::list<Triangle> triangles_list_3d; const Eigen::VectorXd & x_k_k = filter.x_k_k(); const Eigen::MatrixXd & p_k_k = filter.p_k_k(); //Set the position, so the GUI can draw it: rW = x_k_k.segment<3>(0);//current position //Set the axes orientation and confidence: axes_orientation_and_confidence.setIdentity();//axes_orientation_and_confidence stores in each column one axis (X, Y, Z) axes_orientation_and_confidence *= 5; //make the lines larger, so they are actually informative //Apply rotation matrix: Eigen::Matrix3d qWR_R;//Rotation matrix of current orientation quaternion qWR = x_k_k.segment<4>(3); MotionModel::quaternion_matrix(qWR, qWR_R); axes_orientation_and_confidence.applyOnTheLeft(qWR_R); // == R * axes_orientation_and_confidence for (int axis=0 ; axis<axes_orientation_and_confidence.cols() ; axis++){ //Set the length to be 3*sigma: axes_orientation_and_confidence.col(axis) *= 3*std::sqrt(p_k_k(axis, axis)); //the first 3 positions of the cov matrix define the confidence for the position //Translate origin: axes_orientation_and_confidence.col(axis) += rW; } int num_features = (x_k_k.rows()-13)/6; XYZs[0].resize(num_features); XYZs[1].resize(num_features); XYZs[2].resize(num_features); //Compute the 3d positions and inverse depth variances of all the points in the state int i=0; //Feature counter for (int start_feature=13 ; start_feature<x_k_k.rows() ; start_feature+=6){ const int feature_inv_depth_index = start_feature + 5; //As with any normal distribution, nearly all (99.73%) of the possible depths lie within three standard deviations of the mean! const double sigma_3 = std::sqrt(p_k_k(feature_inv_depth_index, feature_inv_depth_index)); //sqrt(depth_variance) const Eigen::VectorXd & yi = x_k_k.segment(start_feature, 6); Eigen::VectorXd point_close(x_k_k.segment(start_feature, 6)); Eigen::VectorXd point_far(x_k_k.segment(start_feature, 6)); //Change the depth of the feature copy, so that it is possible to represent the range between -3*sigma and 3*sigma: point_close(5) += sigma_3; point_far(5) -= sigma_3; Eigen::Vector3d XYZ_mu = (Feature::compute_cartesian(yi)); //mu (mean) Eigen::Vector3d XYZ_close = (Feature::compute_cartesian(point_close)); //mean + 3*sigma. (since inverted signs are also inverted) Eigen::Vector3d XYZ_far = (Feature::compute_cartesian(point_far)); //mean - 3*sigma //The center of the model is ALWAYS the current position of the camera/robot, so have to 'cancel' the current orientation (R_inv) and translation (rWC = x_k_k.head(3)): //Note: It is nicer to do this in the GUI class, as it is only a presention/perspective change. But due to the structure, it was easier to do it here. XYZs[0][i] = Point3d(XYZ_mu(0), XYZ_mu(1), XYZ_mu(2)); //mu (mean) XYZs[1][i] = Point3d(XYZ_close(0), XYZ_close(1), XYZ_close(2)); //mean + 3*sigma. (since inverted signs are also inverted) XYZs[2][i] = Point3d(XYZ_far(0), XYZ_far(1), XYZ_far(2)); //mean - 3*sigma //If the size that contains the 99.73% of the inverse depth distribution is smaller than the current inverse depth, add it to the surface: const double size_sigma_3 = std::abs(1.0/(x_k_k(feature_inv_depth_index)-sigma_3) - 1.0/(x_k_k(feature_inv_depth_index)+sigma_3)); if (size_sigma_3 < 1/x_k_k(feature_inv_depth_index)){ triangle_list.push_back(std::make_pair(Point2d(features_extra[i].z(0), features_extra[i].z(1)), i)); } if (x_k_k(feature_inv_depth_index) < 0 ){ std::cout << "feature behind the camera!!! : idx=" << i << ", value=" << x_k_k(feature_inv_depth_index) << std::endl; } i++; } triangulation.insert(triangle_list.begin(), triangle_list.end()); cv::Scalar delaunay_color = cv::Scalar(255, 0, 0); //blue for(Delaunay::Finite_faces_iterator fit = triangulation.finite_faces_begin(); fit != triangulation.finite_faces_end(); ++fit) { const Delaunay::Face_handle & face = fit; //face->vertex(i)->info() = index of the point in the observation list. line(frame, features_extra[face->vertex(0)->info()].z_cv, features_extra[face->vertex(1)->info()].z_cv, delaunay_color, 1); line(frame, features_extra[face->vertex(1)->info()].z_cv, features_extra[face->vertex(2)->info()].z_cv, delaunay_color, 1); line(frame, features_extra[face->vertex(2)->info()].z_cv, features_extra[face->vertex(0)->info()].z_cv, delaunay_color, 1); //Add the face of the linked 3d points of this 2d triangle: triangles_list_3d.push_back(Triangle(XYZs[1][face->vertex(0)->info()], XYZs[1][face->vertex(1)->info()], XYZs[1][face->vertex(2)->info()])); //XYZs[1] == close } // constructs AABB tree Tree tree(triangles_list_3d.begin(), triangles_list_3d.end()); if (tree.size()>0){ // compute closest point and squared distance Point3d point_query(rW[0], rW[1], rW[2]); closest_point = tree.closest_point(point_query); // FT sqd = tree.squared_distance(point_query); Eigen::Vector3d last_displacement_vector = last_position - rW; // double repealing_force = 0; // if (std::sqrt(sqd) < 0.2){ // std::cout << "can crash! " << std::endl; // repealing_force = 1/std::sqrt(sqd); // } // std::cout << "distance = [distance, " << std::sqrt(sqd) << "];" << std::endl; // std::cout << "repealing_force = [repealing_force, " << repealing_force << "];" << std::endl; } //remember this position last_position = rW; // std::cout << "certaint= " << p_k_k.diagonal().sum() << std::endl; time_triangulation = (double)cv::getTickCount() - time_triangulation; // std::cout << "Triang = " << time_triangulation/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl; time_total = (double)cv::getTickCount() - time_total; // std::cout << "EKF = " << time_total/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl; // std::cout << "tracker = " << time_tracker/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl; }
void opengv::absolute_pose::modules::gp3p_main( const Eigen::Matrix3d & f, const Eigen::Matrix3d & v, const Eigen::Matrix3d & p, transformations_t & solutions) { Eigen::Matrix<double,48,85> groebnerMatrix = Eigen::Matrix<double,48,85>::Zero(); gp3p::init(groebnerMatrix,f,v,p); gp3p::compute(groebnerMatrix); Eigen::Matrix<double,8,8> M = Eigen::Matrix<double,8,8>::Zero(); M.block<6,8>(0,0) = -groebnerMatrix.block<6,8>(36,77); M(6,0) = 1.0; M(7,6) = 1.0; Eigen::ComplexEigenSolver< Eigen::Matrix<double,8,8> > Eig(M,true); Eigen::Matrix<std::complex<double>,8,1> D = Eig.eigenvalues(); Eigen::Matrix<std::complex<double>,8,8> V = Eig.eigenvectors(); for( int c = 0; c < V.cols(); c++ ) { std::complex<double> eigValue = D[c]; if( eigValue.imag() < 0.0001 ) { cayley_t cayley; Eigen::Vector3d n; for(size_t i = 0; i < 3; i++) { std::complex<double> cay = V(i+4,c)/V(7,c); cayley[2-i] = cay.real(); std::complex<double> depth = V(i+1,c)/V(7,c); n[2-i] = depth.real(); } rotation_t rotation = math::cayley2rot(cayley); //the groebner problem was set up to find the transpose! rotation.transposeInPlace(); point_t center_cam = Eigen::Vector3d::Zero(); point_t center_world = Eigen::Vector3d::Zero(); for( size_t i = 0; i < (size_t) f.cols(); i++ ) { point_t temp = rotation*(n[i]*f.col(i)+v.col(i)); center_cam = center_cam + temp; center_world = center_world + p.col(i); } center_cam = center_cam/f.cols(); center_world = center_world/f.cols(); translation_t translation = center_world - center_cam; transformation_t transformation; transformation.block<3,3>(0,0) = rotation; transformation.col(3) = translation; solutions.push_back(transformation); } } }