예제 #1
0
    // for EKF, we publish Tcw
    void publishPosewithCovariance(ros::Time stamp)
    {
//        SE3<> camPose = tracker_->GetCurrentPose();//Tcw
        SE3<> camPose = camPose4pub;//Tcw

        if (true) {
            geometry_msgs::PoseWithCovarianceStampedPtr pose(new geometry_msgs::PoseWithCovarianceStamped);//Tcw
            pose->header.stamp = stamp;
            pose->header.frame_id = "/ptam_world_cov";

            pose->pose.pose.position.x = camPose.get_translation()[0];
            pose->pose.pose.position.y = camPose.get_translation()[1];
            pose->pose.pose.position.z = camPose.get_translation()[2];
            quat_from_so3(pose->pose.pose.orientation, camPose.get_rotation());

            TooN::Matrix<6> covar = tracker_->GetPoseCovariance();
            for (unsigned int i = 0; i < pose->pose.covariance.size(); i++){

                pose->pose.covariance[i] = sqrt(fabs(covar[i % 6][i / 6]));
//                cout << pose->pose.covariance[i] << ", ";
            }
            pose->pose.covariance[1] = tracker_->GetCurrentKeyFrame().dSceneDepthMedian;

//            cout << "PTAM pose, Tcw: " << endl;
//            cout << pose->pose.pose.position.x << ", " << pose->pose.pose.position.y << ", " << pose->pose.pose.position.z << endl;
//            cout << pose->pose.pose.orientation.w << ", " << pose->pose.pose.orientation.x << ", "
//                 << pose->pose.pose.orientation.y << ", " << pose->pose.pose.orientation.z << endl;

            SE3<> roboPose = se3IMUfromcam * camPose;//Twi = (Tic * Tcw).inv
            roboPose = roboPose.inverse();
            cout << "POSITION DIFF: "<< PoseEKF_wi.get_translation()[0]-roboPose.get_translation()[0] << ", " <<
                    PoseEKF_wi.get_translation()[1] - roboPose.get_translation()[1]<< ", " <<
                    PoseEKF_wi.get_translation()[2] - roboPose.get_translation()[2] << endl;
            geometry_msgs::Quaternion quatRobo;
            quat_from_so3(quatRobo, roboPose.get_rotation());//Riw
//            cout << "PTAM pose, Twi: " << endl;
//            cout << roboPose.get_translation()[0] << ", " << roboPose.get_translation()[1] << ", " << roboPose.get_translation()[2] << endl;
//            cout << quatRobo.w << ", " << quatRobo.x << ", "
//                 << quatRobo.y << ", " << quatRobo.z << endl;


//            cout << "Pose Covariance: " << endl;
//            cout << pose->pose.covariance[0] << ", " << pose->pose.covariance[1] << ", " << pose->pose.covariance[2] << endl;
//            cout << pose->pose.covariance[3] << ", " << pose->pose.covariance[4] << ", " << pose->pose.covariance[5] << endl;
//            cout << pose->pose.covariance[6] << ", " << pose->pose.covariance[7] << ", " << pose->pose.covariance[8] << endl;

            cam_posewithcov_pub_.publish(pose);

            geometry_msgs::TransformStampedPtr quadpose(new geometry_msgs::TransformStamped);// for ros nodelets, publish ptr.
            quadpose->header.stamp = stamp;
            quadpose->header.frame_id = "/ptam_world";
            quadpose->transform.translation.x = roboPose.get_translation()[0];
            quadpose->transform.translation.y = roboPose.get_translation()[1];
            quadpose->transform.translation.z = roboPose.get_translation()[2];
            quat_from_so3(quadpose->transform.rotation, roboPose.get_rotation());
            quad_pose_pub_.publish(quadpose);

            logPose(stamp, roboPose);
        }
    }
예제 #2
0
    // get Tcw
    SE3<> imu2camWorld(pximu::AttitudeData attitude_data){
        Matrix<3> Rroll =  Data(1.0, 0, 0,//roll Rot(-roll)T=Rot(roll)
                              0, cos(attitude_data.roll), -sin(attitude_data.roll),
                              0, sin(attitude_data.roll), cos(attitude_data.roll));
        Matrix<3> Rpitch = Data(cos(attitude_data.pitch), 0, -sin(attitude_data.pitch),//pitch Rot(-pitch)T=Rot(pitch)
                              0, 1.0, 0,
                              sin(attitude_data.pitch), 0, cos(attitude_data.pitch));

        Matrix<3> roll = Data(1.0, 0, 0,//Rww1, because the roll and pitch angles are in
                              0, -1, 0, // a world frame which pointing downward.
                              0, 0, -1);

        SE3<> camWorld = SE3<>();
        Matrix<3> rotation = roll * Rpitch * Rroll; //Rwr*Rrc-->Rwc
        camWorld.get_rotation() = rotation*se3IMUfromcam.get_rotation().get_matrix();

        Vector<3> twr = makeVector(0.0, 0.0, 0.198);// twc = twr + Rwr * trc
        Vector<3> twc = twr + rotation * se3IMUfromcam.get_translation();
        camWorld.get_translation()[0] = 0.0;//twc[0]; //twc
        camWorld.get_translation()[1] = 0.0;//twc[1]; // TODO: find out the bug why must use 0
        camWorld.get_translation()[2] = twc[2];

        camWorld = camWorld.inverse();//Tcw

        cout<< "TCW INITIALIZED FROM IMU ATT. TWC: " << twc[0]<< ", " << twc[1]<< ", " << twc[2]<<endl;
        return camWorld;
    }
예제 #3
0
    SE3<> IMU2camWorldfromQuat(Eigen::Quaternion<double> atti){// use ini attitude info from EKF node to ini ptam pose
        Vector<4> attivec = makeVector(atti.w(), atti.x(), atti.y(), atti.z());//Rw1i
        Matrix<3> iniOrientationEKF;
        iniOrientationEKF = tag::quaternionToMatrix(attivec);

        Matrix<3> roll = TooN::Data(1.0, 0, 0,//Rww1, because the roll and pitch angles are in
                              0, -1, 0, // a world frame which pointing downward.
                              0, 0, -1);

        SE3<> camWorld = SE3<>();
        Matrix<3> rotation;
        if (tracker_->attitude_get)
            rotation = iniOrientationEKF; //
        else
            rotation = roll * iniOrientationEKF;//Rwi = Rww1*Rw1i
        camWorld.get_rotation() = rotation*se3IMUfromcam.get_rotation().get_matrix();//Rwc = (Rwi * Ric)

        Vector<3> twr = makeVector(0.0, 0.0, 0.198);// twc = twr + Rwr * trc
        Vector<3> twc = twr + rotation * se3IMUfromcam.get_translation();
        camWorld.get_translation()[0] = 0.0;//twc[0]; //twc
        camWorld.get_translation()[1] = 0.0;//twc[1];
        camWorld.get_translation()[2] = twc[2];

        camWorld = camWorld.inverse();//Tcw

        cout<< "TCW INITIALIZED. TWC: " << twc[0]<< ", " << twc[1]<< ", " << twc[2]<<endl;
//        cout<< camWorld.get_rotation().get_matrix()(2,2)<<endl;
        return camWorld;
    }
string serializeForOutput(SE3<double> pose)
{
  stringstream so;
  Vector<3>& trans=pose.get_translation();
  Matrix<3, 3> rot=pose.get_rotation().get_matrix(); // row major
  // output in column-major - for matlab! just use reshape(x,3,4)
  for (uint32_t j = 0; j < 3; ++j)
    for (uint32_t i = 0; i < 3; ++i)
      so << rot(i, j) << "\t";
  for (uint32_t i = 0; i < 3; ++i)
      so << trans[i] << "\t";
  return so.str();
}
예제 #5
0
int main(int argc, char ** argv){
    test();

#if 0
    SE3<> id(makeVector(1,0,0,0,0,0));
    
    const SE3<F<double> > g = make_left_fad_se3(id);
    for(int i = 0; i < 6; ++i)
        cout << get_derivative(g.get_rotation().get_matrix(), i) << get_derivative(g.get_translation(), i) <<  "\n\n";

    Vector<3> in = makeVector(1,2,3);
    const Vector<3, F<double> > p = g * in;
    cout << p << "\n" << get_jacobian<3,6>(p) << endl;
#endif
}
예제 #6
0
    // the second camera pose in the master camera frames
    SE3<> Load_camsec_para()
    {
        SE3<> campara;
        ifstream cam_para_table;
        string table_path = cam_para_path_second;
        double temp1, temp2, temp3, temp4, temp5;
        cam_para_table.open(table_path.c_str());
        assert(cam_para_table.is_open());

//        cam_para_table>>temp1>>temp2>>temp3>>temp4>>temp5;
        Matrix<3> cam;
        for (int i = 0; i < 3; i ++){
            for (int j = 0; j < 3; j ++){
                cam_para_table>>temp1;
                cam(i, j) = temp1;
            }
            cam_para_table>>temp1;
            campara.get_translation()[i] = temp1/1000.0;
        }
        campara.get_rotation() = cam;

        return campara;//Tic
    }
int main(int argc, char **argv) {
	InitVars(argc, argv);
	if (argc != 1) {
		DLOG << "Usage: "<<argv[0];
		return 0;
	}

	// Load the map and rotate to canonical image
	Map map;
	map.load_images = false;
	map.Load();

	Vector<3> lnR = makeVector(1.1531, 1.26237, -1.24435);  // for lab_kitchen1

	SE3<> scene_to_slam;
	scene_to_slam.get_rotation() = SO3<>::exp(lnR);

	map.scene_to_slam = SO3<>::exp(lnR);

	// TODO: map.Load() should do this automatically
	map.undistorter.Compute(ImageRef(640,480));
	ptam::ATANCamera& cam = map.undistorter.cam;

	vector<int> frame_ids;
	ParseMultiRange(GV3::get<string>("CanonicalImages.InputFrames"), frame_ids);

	COUNTED_FOREACH(int i, int id, frame_ids) {
		if (id < 0 || id >= map.frame_specs.size()) {
			DLOG << "Frame index " << id << " out of range"
					 << " (there are " << map.frame_specs.size() << "frames available)";
		} else {
			const Frame& fs = map.frame_specs[frame_ids[i]];
			if (!fs.lost) {


				// Read the image and detect lines
				ImageBundle image(fs.filename);

				PeakLines lines;
	 			lines.Compute(image, fs.pose, map);
				
				SE3<> pose = fs.pose * scene_to_slam;


				// Project vanishing points
				Vector<3> vpts[3];
				int vert_axis = 0;
				for (int i = 0; i < 3; i++) {
					vpts[i] = pose.get_rotation().get_matrix().T()[i];
					if (abs(vpts[i][1]) > abs(vpts[vert_axis][1])) {
						vert_axis = i;
					}
				}

				int l_axis = (vert_axis+1)%3;
				int r_axis = (vert_axis+2)%3;

				// Compute the rotations
				Matrix<3> R_l_inv, R_r_inv;

				R_l_inv.T()[0] = positive(vpts[l_axis], 0);
				R_l_inv.T()[1] = positive(vpts[vert_axis], 1);
				R_l_inv.T()[2] = vpts[r_axis];

				R_r_inv.T()[0] = positive(vpts[r_axis], 0);
				R_r_inv.T()[1] = positive(vpts[vert_axis], 1);
				R_r_inv.T()[2] = vpts[l_axis];

				Matrix<3> R_l = LU<3>(R_l_inv).get_inverse();
				Matrix<3> R_r = LU<3>(R_r_inv).get_inverse();

				// Warp the images
				ImageRGB<byte> canvas_l(image.sz()), canvas_r(image.sz());
				canvas_l.Clear(PixelRGB<byte>(255,255,255));
				canvas_r.Clear(PixelRGB<byte>(255,255,255));
				for (int y = 0; y < image.ny(); y++) {
					for (int x = 0; x < image.nx(); x++) {

						Vector<3> dest = unproject(cam.UnProject(makeVector(x,y)));

						Vector<3> l_src = R_l_inv * dest;
						Vector<3> r_src = R_r_inv * dest;

						Vector<2> l_im = cam.Project(project(l_src));
						Vector<2> r_im = cam.Project(project(r_src));

						ImageRef l_ir = round_pos(l_im);
						ImageRef r_ir = round_pos(r_im);

						if (image.contains(l_ir)) {
							canvas_l[y][x] = image.rgb[l_ir];
						}
						if (image.contains(r_ir)) {
							canvas_r[y][x] = image.rgb[r_ir];
						}
					}
				}

				ImageRGB<byte> canvas;
				ImageCopy(image.rgb, canvas);

				// Read ratios
				double c_over_f = GV3::get<double>("CanonicalImage.LeftCOverF");
				double f_over_c = 1.0 / c_over_f;

				// Transfer some points
				int horizon_y = image.ny()/2;
				for (int j = 0; j < 50; j++) {
					Vector<2> im_p1 = makeVector(rand()%image.nx(), rand()%image.ny());
					Vector<2> ret_p1 = cam.UnProject(im_p1);

					double y2;
					if (ret_p1[1] > 0) {
						y2 = -c_over_f * ret_p1[1];
					} else {
						y2 = -f_over_c * ret_p1[1];
					}

					Vector<2> ret_p2 = makeVector(ret_p1[0], y2);
					Vector<2> im_p2 = cam.Project(ret_p2);

					Vector<3> orig_ret_p1 = R_l * unproject(ret_p1);
					Vector<3> orig_ret_p2 = R_l * unproject(ret_p2);

					Vector<2> orig_im_p1 = cam.Project(project(orig_ret_p1));
					Vector<2> orig_im_p2 = cam.Project(project(orig_ret_p2));

					DrawLineClipped(canvas, orig_im_p1, orig_im_p2, BrightColors::Get(j));
					DrawLineClipped(canvas_l, im_p1, im_p2, BrightColors::Get(j));
				}

				string basename = "out/canon"+PaddedInt(id, 4);
				WriteImage(basename+"_left.png", canvas_l);
				WriteImage(basename+"_right.png", canvas_r);
				WriteImage(basename+"_orig.png", canvas);
			}
		}
	}
}