コード例 #1
0
Eigen::Matrix4f
parse_registration(turtle_input& input, const std::string& guid_0,
                     const std::string& guid_1) {
    std::pair<std::string, bool> t = parse_transformation_string(input, guid_0, guid_1);
    std::istringstream iss(t.first);
    std::vector<std::string> tokens{std::istream_iterator<std::string>{iss}, std::istream_iterator<std::string>{}};
    assert(tokens.size() == 16);

    Eigen::Matrix4f m;
    for (uint32_t i = 0; i < 4; ++i) {
        for (uint32_t j = 0; j < 4; ++j) {
            double value;
            std::stringstream convert_str(tokens[i*4+j]);
            convert_str >> value;
            convert_str.str("");
            m(i, j) = value;
        }
    }

    if (t.second) {
        Eigen::Matrix4f inv = m.inverse();
        m = inv;
    }

    return m;
}
コード例 #2
0
Eigen::Matrix4f GroundTruthOdometry::getTransformation(uint64_t timestamp)
{
    Eigen::Matrix4f pose = Eigen::Matrix4f::Identity();

    if(last_utime != 0)
    {
        std::map<uint64_t, Eigen::Isometry3f>::const_iterator it = camera_trajectory.find(last_utime);
        if (it == camera_trajectory.end())
        {
            last_utime = timestamp;
            return pose;
        }

        //Poses are stored in the file in iSAM basis, undo it
        Eigen::Matrix4f M;
        M <<  0,  0, 1, 0,
             -1,  0, 0, 0,
              0, -1, 0, 0,
              0,  0, 0, 1;

        pose = M.inverse() * camera_trajectory[timestamp] * M;
    }
    else
    {
        std::map<uint64_t, Eigen::Isometry3f>::const_iterator it = camera_trajectory.find(timestamp);
        Eigen::Isometry3f ident = it->second;
        pose = Eigen::Matrix4f::Identity();
        camera_trajectory[last_utime] = ident;
    }

    last_utime = timestamp;

    return pose;
}
コード例 #3
0
ファイル: scene.cpp プロジェクト: AIS-Bonn/vismotcoord
void Scene::initScene(CloudPtr &pc_in, CloudPtr &pc_transformed, MatrixXf boundBox, Eigen::Matrix4f eigen_virtualFromFocus_old)
{
    pc_scene.reset (new Cloud);
    pc_scene_focus.reset (new Cloud);
    pc_scene_focus_back.reset(new Cloud);
    for(int i=0;i<pc_in->points.size();i++){
        PointT point_in = pc_in->points[i];
        PointT point_transformed = pc_transformed->points[i];
        // cut off
        if(point_transformed.x<boundBox(0,0) && point_transformed.x>boundBox(0,1) && point_transformed.y<boundBox(1,0) && point_transformed.y>boundBox(1,1) && point_transformed.z<boundBox(2,0) && point_transformed.z>boundBox(2,1)){
            pc_scene->points.push_back(point_in);
            pc_scene_focus->points.push_back(point_transformed);
        }
    }

    // return back pc_scene to the original cam pos
    Eigen::Matrix4f inv = eigen_virtualFromFocus_old.inverse();
    for(int i=0;i<pc_scene_focus->points.size();i++){
        PointT point = pc_scene_focus->points[i];
        PointT point_back;
        point_back.x = inv(0,0)*point.x + inv(0,1)*point.y + inv(0,2)*point.z + inv(0,3);
        point_back.y = inv(1,0)*point.x + inv(1,1)*point.y + inv(1,2)*point.z + inv(1,3);
        point_back.z = inv(2,0)*point.x + inv(2,1)*point.y + inv(2,2)*point.z + inv(2,3);
        pc_scene_focus_back->points.push_back(point_back);
    }


//    if(USENORM) calcNormal();
}
コード例 #4
0
ファイル: communication.cpp プロジェクト: jpmerc/perception3d
void Communication::publish_objectToGrasp_moveitFormat(){
    ros::Rate r(1);
    ros::NodeHandle node;
    static tf::TransformBroadcaster br;
    Eigen::Matrix4f camToWorldMatrix; camToWorldMatrix.setZero();camToWorldMatrix(0,2) = 1; camToWorldMatrix(1,0) = -1; camToWorldMatrix(2,1) = -1; camToWorldMatrix(3,3) = 1;

    while(node.ok()){
        pcl::PointCloud<PointT>::Ptr object_pc = m_object_ex_ptr->getObjectToGrasp();
        pcl::PointCloud<PointT>::Ptr object_pc_transformed(new pcl::PointCloud<PointT>);
//        Eigen::Vector4f c = m_object_ex_ptr->getGraspCentroid(); c(3)=1;
//        tf::Transform simpleTF;
        tf::StampedTransform camToJacoTf;
        tf::TransformListener listener;
        bool tf_ready = listener.waitForTransform("camera_rgb_frame","root",ros::Time(0),ros::Duration(5.0));
        if(object_pc->size() > 0 && tf_ready){
            listener.lookupTransform("camera_rgb_frame","root",ros::Time(0),camToJacoTf);
            Eigen::Matrix4f camToJacoMatrix;
            pcl_ros::transformAsMatrix(camToJacoTf,camToJacoMatrix);

            Eigen::Matrix4f combinedMatrix = camToJacoMatrix.inverse() * camToWorldMatrix;
//            Eigen::Vector4f result = combinedMatrix * c;
//            Eigen::Matrix4f res;  res(0,3) = result(0); res(1,3) = result(1); res(2,3) = result(2); res(3,3) = 1;
//            simpleTF = tfFromEigen(res);
//            br.sendTransform(tf::StampedTransform(simpleTF, ros::Time::now(), "root", "object_centroid_test"));

            pcl::transformPointCloud(*object_pc, *object_pc_transformed, combinedMatrix);
//            std::cout << "old : " << object_pc->at(100) << std::endl;
//            std::cout << "new : " << object_pc_transformed->at(100) << std::endl;
            ObjectToGrasp_publisher_.publish(object_pc_transformed);
        }
        r.sleep();
    }
}
コード例 #5
0
void IndexMap::predictIndices(const Eigen::Matrix4f & pose,
                              const int & time,
                              const std::pair<GLuint, GLuint> & model,
                              const float depthCutoff,
                              const int timeDelta)
{
    indexFrameBuffer.Bind();

    glPushAttrib(GL_VIEWPORT_BIT);

    glViewport(0, 0, indexRenderBuffer.width, indexRenderBuffer.height);

    glClearColor(0, 0, 0, 0);

    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    indexProgram->Bind();

    Eigen::Matrix4f t_inv = pose.inverse();

    Eigen::Vector4f cam(Intrinsics::getInstance().cx() * IndexMap::FACTOR,
                  Intrinsics::getInstance().cy() * IndexMap::FACTOR,
                  Intrinsics::getInstance().fx() * IndexMap::FACTOR,
                  Intrinsics::getInstance().fy() * IndexMap::FACTOR);

    indexProgram->setUniform(Uniform("t_inv", t_inv));
    indexProgram->setUniform(Uniform("cam", cam));
    indexProgram->setUniform(Uniform("maxDepth", depthCutoff));
    indexProgram->setUniform(Uniform("cols", (float)Resolution::getInstance().cols() * IndexMap::FACTOR));
    indexProgram->setUniform(Uniform("rows", (float)Resolution::getInstance().rows() * IndexMap::FACTOR));
    indexProgram->setUniform(Uniform("time", time));
    indexProgram->setUniform(Uniform("timeDelta", timeDelta));

    glBindBuffer(GL_ARRAY_BUFFER, model.first);

    glEnableVertexAttribArray(0);
    glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, 0);

    glEnableVertexAttribArray(1);
    glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, reinterpret_cast<GLvoid*>(sizeof(Eigen::Vector4f)));

    glEnableVertexAttribArray(2);
    glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, reinterpret_cast<GLvoid*>(sizeof(Eigen::Vector4f) * 2));

    glDrawTransformFeedback(GL_POINTS, model.second);

    glDisableVertexAttribArray(0);
    glDisableVertexAttribArray(1);
    glDisableVertexAttribArray(2);
    glBindBuffer(GL_ARRAY_BUFFER, 0);

    indexFrameBuffer.Unbind();

    indexProgram->Unbind();

    glPopAttrib();

    glFinish();
}
コード例 #6
0
ファイル: KFsphere_SLAM.cpp プロジェクト: EduFdez/rgbd360
    bool isOdometryContinuousMotion(Eigen::Matrix4f &prevPose, Eigen::Matrix4f &currPose, float thresDist = 0.1)
    {
        Eigen::Matrix4f relativePose = prevPose.inverse() * currPose;
        if( relativePose.block(0,3,3,1).norm() > thresDist )
            return false;

        return true;
    }
コード例 #7
0
ファイル: scene.cpp プロジェクト: patrickriordan/mass-spring
void Scene::GetCameraRay(double x, double y, Eigen::Vector3d* origin, Eigen::Vector3d* ray) {
  Eigen::Matrix4f inverse;
  inverse = g_viewMatrix.inverse();
  Eigen::Vector4f preVec;
  preVec << (2 * x / DDWIDTH) - 1, 1 - (2 * y / DDHEIGHT), 2 * .5 - 1, 1;
  (*origin)[0] = xpos;
  (*origin)[1] = ypos;
  (*origin)[2] = zpos;
  Eigen::Vector4f ori = inverse * preVec;
  (*ray)[0] = ori[0] - (*origin)[0];
  (*ray)[1] = ori[1] - (*origin)[1];
  (*ray)[2] = ori[2] - (*origin)[2];
}
コード例 #8
0
ファイル: DynamicsRobot.cpp プロジェクト: TheMarex/simox
void DynamicsRobot::setGlobalPose(Eigen::Matrix4f &gp)
{
    Eigen::Matrix4f currentPose = robot->getGlobalPose();
    Eigen::Matrix4f delta = gp * currentPose.inverse();

    robot->setGlobalPose(gp);
    std::map<VirtualRobot::RobotNodePtr, DynamicsObjectPtr>::iterator it = dynamicRobotNodes.begin();
    while (it != dynamicRobotNodes.end())
    {
        Eigen::Matrix4f newPose = it->second->getSceneObject()->getGlobalPose() * delta;
        it->second->setPose(newPose);
        it++;
    }
}
コード例 #9
0
ファイル: DepthFrame.cpp プロジェクト: aclapes/ReMedi2
void DepthFrame::getRegisteredAndReferencedPointCloud(pcl::PointCloud<pcl::PointXYZ>& cloud)
{
    pcl::PointXYZ regReferencePoint = getRegisteredReferencePoint();
    
    // Build a translation matrix to the registered reference the cloud after its own registration
    Eigen::Matrix4f E = Eigen::Matrix4f::Identity();
    E.col(3) = regReferencePoint.getVector4fMap(); // set translation column
    
    // Apply registration first and then referenciation (right to left order in matrix product)
    pcl::PointCloud<pcl::PointXYZ>::Ptr pCloudTmp (new pcl::PointCloud<pcl::PointXYZ>);
    getPointCloud(*pCloudTmp);
    
    pcl::transformPointCloud(*pCloudTmp, cloud, E.inverse() * m_T);
}
コード例 #10
0
void SQ_fitter<PointT>::getBoundingBox(const PointCloudPtr &_cloud,
				       double _dim[3],
				       double _trans[3],
				       double _rot[3] ) {

  // 1. Compute the bounding box center
  Eigen::Vector4d centroid;
  pcl::compute3DCentroid( *_cloud, centroid );
  _trans[0] = centroid(0);
  _trans[1] = centroid(1); 
  _trans[2] = centroid(2);

  // 2. Compute main axis orientations
  pcl::PCA<PointT> pca;
  pca.setInputCloud( _cloud );
  Eigen::Vector3f eigVal = pca.getEigenValues();
  Eigen::Matrix3f eigVec = pca.getEigenVectors();
  // Make sure 3 vectors are normal w.r.t. each other
  Eigen::Vector3f temp;
  eigVec.col(2) = eigVec.col(0); // Z
  Eigen::Vector3f v3 = (eigVec.col(1)).cross( eigVec.col(2) );
  eigVec.col(0) = v3;
  Eigen::Vector3f rpy = eigVec.eulerAngles(2,1,0);
 
  _rot[0] = (double)rpy(2);
  _rot[1] = (double)rpy(1);
  _rot[2] = (double)rpy(0);

  // Transform _cloud
  Eigen::Matrix4f transf = Eigen::Matrix4f::Identity();
  transf.block(0,3,3,1) << (float)centroid(0), (float)centroid(1), (float)centroid(2);
  transf.block(0,0,3,3) = eigVec;

  Eigen::Matrix4f tinv; tinv = transf.inverse();
  PointCloudPtr cloud_temp( new pcl::PointCloud<PointT>() );
  pcl::transformPointCloud( *_cloud, *cloud_temp, tinv );

  // Get maximum and minimum
  PointT minPt; PointT maxPt;
  pcl::getMinMax3D( *cloud_temp, minPt, maxPt );
  
  _dim[0] = ( maxPt.x - minPt.x ) / 2.0;
  _dim[1] = ( maxPt.y - minPt.y ) / 2.0;
  _dim[2] = ( maxPt.z - minPt.z ) / 2.0;

}
コード例 #11
0
ファイル: Camera.cpp プロジェクト: elliotfiske/slumber
void Camera::applyViewMatrix(MatrixStack *MV) const
{
	// Create the translation and rotation matrices
	Eigen::Matrix4f T = Eigen::Matrix4f::Identity();
	T(0,3) = position(0);
	T(1,3) = position(1);
	T(2,3) = position(2);
	Eigen::Matrix4f YR = Eigen::Matrix4f::Identity();
	YR.block<3,3>(0,0) = Eigen::AngleAxisf(yawRotation, Eigen::Vector3f(0.0f, 1.0f, 0.0f)).toRotationMatrix();
	Eigen::Matrix4f PR = Eigen::Matrix4f::Identity();
	PR.block<3,3>(0,0) = Eigen::AngleAxisf(pitchRotation, Eigen::Vector3f(1.0f, 0.0f, 0.0f)).toRotationMatrix();

	// The matrix C is the product of these matrices
	Eigen::Matrix4f C = T * YR * PR; // Also apply rotations here
	// The view matrix is the inverse
	Eigen::Matrix4f V = C.inverse();
	// Add to the matrix stack
	MV->multMatrix(V);
}
コード例 #12
0
ファイル: scene.cpp プロジェクト: AIS-Bonn/vismotcoord
void Scene::focusSceneUpdate(CloudPtr &pc_in, tf::Transform &transform_camFromBase, tf::Transform &transform_baseFromfocus, MatrixXf boundBox, Eigen::Matrix4f eigen_virtualFromFocus_old)
{
    pc_scene.reset (new Cloud);
    pc_scene_focus.reset (new Cloud);
    pc_scene_base.reset(new Cloud);
    pc_scene_focus_back.reset(new Cloud);

    CloudPtr pc_scene_focus_temp;
    pc_scene_focus_temp.reset (new Cloud);

    Eigen::Matrix4f eigen_camFromBase;
    transformAsMatrix (transform_camFromBase, eigen_camFromBase);
    if(DEBUG_ALGORITHM) cout<<"-------------camupdated"<<endl;
    if(DEBUG_ALGORITHM) cout<<eigen_camFromBase<<endl;

    pcl_ros::transformPointCloud(*pc_in, *pc_scene_base, transform_camFromBase);
    pcl_ros::transformPointCloud(*pc_scene_base, *pc_scene_focus_temp, transform_baseFromfocus);

    for(int i=0;i<pc_in->points.size();i++){
        PointT point_in = pc_in->points[i];
        PointT point_transformed = pc_scene_focus_temp->points[i];
        // cut off
        if(point_transformed.x<boundBox(0,0) && point_transformed.x>boundBox(0,1) && point_transformed.y<boundBox(1,0) && point_transformed.y>boundBox(1,1) && point_transformed.z<boundBox(2,0) && point_transformed.z>boundBox(2,1)){
            pc_scene->points.push_back(point_in);
            pc_scene_focus->points.push_back(point_transformed);
        }
    }

    // return back pc_scene to the original cam pos
    Eigen::Matrix4f inv = eigen_virtualFromFocus_old.inverse();
    for(int i=0;i<pc_scene_focus->points.size();i++){
        PointT point = pc_scene_focus->points[i];
        PointT point_back;
        point_back.x = inv(0,0)*point.x + inv(0,1)*point.y + inv(0,2)*point.z + inv(0,3);
        point_back.y = inv(1,0)*point.x + inv(1,1)*point.y + inv(1,2)*point.z + inv(1,3);
        point_back.z = inv(2,0)*point.x + inv(2,1)*point.y + inv(2,2)*point.z + inv(2,3);
        pc_scene_focus_back->points.push_back(point_back);
    }

//    if(USENORM) calcNormal();
}
コード例 #13
0
pcl::PointCloud<pcl::PointXYZRGB>::Ptr 
point_get_reg_cloud (pcl::PointCloud<pcl::PointXYZRGB>::Ptr source_cloud, pcl::PointCloud<pcl::PointXYZRGB>::Ptr target_cloud)
{
  pcl::PointCloud<pcl::PointXYZ>::Ptr reduced_source, reduced_target;
  reduced_source = point_cloud_xyzrgb_to_xyz(source_cloud); //Here, we create new cloud. So you can handle reduced clouds regardless of orignial ones
  reduced_target = point_cloud_xyzrgb_to_xyz(target_cloud);
  reduced_source = point_prepare_icp(reduced_source);
  reduced_target = point_prepare_icp(reduced_target);
  //Prepare clouds to be icp-ed
  Eigen::Matrix4f transform = Eigen::Matrix4f::Identity();
  Eigen::Matrix4f target_to_source_transformation;
  //Transformation Matrix
  pcl::IterativeClosestPoint<pcl::PointXYZ, pcl::PointXYZ> icp;
  //ICP Filter
  pcl::PointCloud<pcl::PointXYZ>::Ptr icp_result (new pcl::PointCloud<pcl::PointXYZ>);
  std::vector<unsigned int> score;
  for(int i = 0; i < reg_iteration ; i ++)
    {
      icp.setInputCloud(reduced_source);
      icp.setInputTarget(reduced_target);
      //set ICP
      icp.align(*reduced_source);
      transform *= icp.getFinalTransformation();
      unsigned int current_score = (unsigned int) icp.getFitnessScore();
      if(i>1 && score.back()-current_score < 50)
	{
        std::cout<<"ran"<<i<<"times, with score : "<<icp.getFitnessScore() << std::endl;
	break;
	}
      else
	score.push_back(current_score);
    }
  target_to_source_transformation = transform.inverse();
  //"We got transformation matrix, now transforming target into source's frame"
  pcl::transformPointCloud(*target_cloud, *target_cloud, target_to_source_transformation);
  //"Transformation done. Returning transformed cloud"
  *source_cloud += *target_cloud;
  //Merge them
  return source_cloud;
}
コード例 #14
0
Eigen::Matrix4f computeGroundTruth(const std::string path_groundtruth, const int id)
{
	std::string line;
	std::ifstream file(path_groundtruth);
	int offset = 3;
	std::vector<std::string> splitVec;
	int index = id * 5 + offset;
	Eigen::Matrix4f transformationMatrix;

	if (file.is_open())
	{
		int count = 1;
		bool done = false;

		while (count<index)
		{
			std::getline(file, line);
			count++;
		}

		//start reading matrix from file
		for (int i = 0; i<4; i++)
		{
			std::getline(file, line);
			boost::split(splitVec, line, boost::is_any_of("\t"), boost::token_compress_on);
			transformationMatrix.row(i) << boost::lexical_cast<float>(splitVec.at(0)),
				boost::lexical_cast<float>(splitVec.at(1)),
				boost::lexical_cast<float>(splitVec.at(2)),
				boost::lexical_cast<float>(splitVec.at(3));
		}
	}
	else
		std::cout << "GroundTruth file not found" << std::endl;

	transformationMatrix.inverse();

	return transformationMatrix;
}
コード例 #15
0
ファイル: offline_integration.cpp プロジェクト: FBIKKIBF/pcl
void
pcl::ihs::OfflineIntegration::computationThread ()
{
  std::vector <std::string> filenames;

  if (!this->getFilesFromDirectory (path_dir_, ".pcd", filenames))
  {
    std::cerr << "ERROR in offline_integration.cpp: Could not get the files from the directory\n";
    return;
  }

  // First cloud is reference model
  std::cerr << "Processing file " << std::setw (5) << 1 << " / " << filenames.size () << std::endl;
  CloudXYZRGBNormalPtr cloud_model (new CloudXYZRGBNormal ());
  Eigen::Matrix4f T = Eigen::Matrix4f::Identity ();
  if (!this->load (filenames [0], cloud_model, T))
  {
    std::cerr << "ERROR in offline_integration.cpp: Could not load the model cloud.\n";
    return;
  }

  pcl::transformPointCloudWithNormals (*cloud_model, *cloud_model, T);

  if (!integration_->reconstructMesh (cloud_model, mesh_model_))
  {
    std::cerr << "ERROR in offline_integration.cpp: Could not reconstruct the model mesh.\n";
    return;
  }

  Base::setPivot ("model");
  Base::addMesh (mesh_model_, "model");

  if (filenames.size () < 1)
  {
    return;
  }

  for (unsigned int i=1; i<filenames.size (); ++i)
  {
    std::cerr << "Processing file " << std::setw (5) << i+1 << " / " << filenames.size () << std::endl;

    {
      boost::mutex::scoped_lock lock (mutex_);
      if (destructor_called_) return;
    }
    boost::mutex::scoped_lock lock_quit (mutex_quit_);

    CloudXYZRGBNormalPtr cloud_data (new CloudXYZRGBNormal ());
    if (!this->load (filenames [i], cloud_data, T))
    {
      std::cerr << "ERROR in offline_integration.cpp: Could not load the data cloud.\n";
      return;
    }

    if (!integration_->merge (cloud_data, mesh_model_, T))
    {
      std::cerr << "ERROR in offline_integration.cpp: merge failed.\n";
      return;
    }

    integration_->age (mesh_model_);

    {
      lock_quit.unlock ();
      boost::mutex::scoped_lock lock (mutex_);
      if (destructor_called_) return;

      Base::addMesh (mesh_model_, "model", Eigen::Isometry3d (T.inverse ().cast <double> ()));
      Base::calcFPS (computation_fps_);
    }
  }
  Base::setPivot ("model");
}
コード例 #16
0
void
RegisteredViewsSource<Full3DPointT, PointInT, OutModelPointT>::loadModel (ModelT & model)
{
    const std::string training_view_path = path_ + model.class_ + "/" + model.id_ + "/views/";
    const std::string view_pattern = ".*" + view_prefix_ + ".*.pcd";
    model.view_filenames_ = io::getFilesInDirectory(training_view_path, view_pattern, false);
    std::cout << "Object class: " << model.class_ << ", id: " << model.id_ << ", views: " << model.view_filenames_.size() << std::endl;

    typename pcl::PointCloud<Full3DPointT>::Ptr modell (new pcl::PointCloud<Full3DPointT>);
    typename pcl::PointCloud<Full3DPointT>::Ptr modell_voxelized (new pcl::PointCloud<Full3DPointT>);
    pcl::io::loadPCDFile(path_ + model.class_ + "/" + model.id_ + "/3D_model.pcd", *modell);

    float voxel_grid_size = 0.003f;
    typename pcl::VoxelGrid<Full3DPointT> grid_;
    grid_.setInputCloud (modell);
    grid_.setLeafSize (voxel_grid_size, voxel_grid_size, voxel_grid_size);
    grid_.setDownsampleAllData (true);
    grid_.filter (*modell_voxelized);

    model.normals_assembled_.reset(new pcl::PointCloud<pcl::Normal>);
    model.assembled_.reset (new pcl::PointCloud<PointInT>);

    pcl::copyPointCloud(*modell_voxelized, *model.assembled_);
    pcl::copyPointCloud(*modell_voxelized, *model.normals_assembled_);

    if(load_into_memory_)
    {
        model.views_.resize( model.view_filenames_.size() );
        model.indices_.resize( model.view_filenames_.size() );
        model.poses_.resize( model.view_filenames_.size() );
        model.self_occlusions_.resize( model.view_filenames_.size() );

        for (size_t i=0; i<model.view_filenames_.size(); i++)
        {
            // load training view
            const std::string view_file = training_view_path + "/" + model.view_filenames_[i];
            model.views_[i].reset( new pcl::PointCloud<PointInT> () );
            pcl::io::loadPCDFile (view_file, *model.views_[i]);

            // read pose
            std::string pose_fn (view_file);
            boost::replace_last (pose_fn, view_prefix_, pose_prefix_);
            boost::replace_last (pose_fn, ".pcd", ".txt");
            Eigen::Matrix4f pose = io::readMatrixFromFile( pose_fn );
            model.poses_[i] = pose.inverse(); //the recognizer assumes transformation from M to CC - i think!

            // read object mask
            model.indices_[i].indices.clear();
            std::string obj_indices_fn (view_file);
            boost::replace_last (obj_indices_fn, view_prefix_, indices_prefix_);
            boost::replace_last (obj_indices_fn, ".pcd", ".txt");
            std::ifstream f ( obj_indices_fn.c_str() );
            int idx;
            while (f >> idx)
                model.indices_[i].indices.push_back(idx);
            f.close();

            model.self_occlusions_[i] = -1.f;
        }
    }
    else
    {
コード例 #17
0
/** Convert transform in global (ladybug) coordinates to local (camera) coordinates.
 * @param Eigen::Matrix4f transform in global coordinates
 * @param int index of the previous camera
 * @param int index of the current camera
 * @return Eigen::Matrix4f transform in local coordinates */
Eigen::Matrix4f Ladybug2::Ladybug2CamRef(Eigen::Matrix4f TGlobal, int camNoPrev, int camNoCurr)
{
    Eigen::Matrix4f TLocalInv = extrinsics_[camNoPrev].inverse() * TGlobal * extrinsics_[camNoCurr];
    Eigen::Matrix4f TLocal = TLocalInv.inverse();
    return TLocal;
}
コード例 #18
0
/** Convert transform in local (camera) coordinates to global (ladybug) coordinates.
 * @param Eigen::Matrix4f transform in local coordinates
 * @param int index of the previous camera
 * @param int index of the current camera
 * @return Eigen::Matrix4f transform in global coordinates */
Eigen::Matrix4f Ladybug2::cam2LadybugRef(Eigen::Matrix4f TLocal, int camNoPrev, int camNoCurr)
{
    Eigen::Matrix4f TGlobal = extrinsics_[camNoPrev] * TLocal.inverse() * extrinsics_[camNoCurr].inverse();
    return TGlobal;
}
コード例 #19
0
ファイル: ar_kinect.cpp プロジェクト: MorS25/dasl-ros-pkg
  /* 
   * One and only one callback, now takes cloud, does everything else needed. 
   */
  void ARPublisher::getTransformationCallback (const sensor_msgs::PointCloud2ConstPtr & msg)
  {
    sensor_msgs::ImagePtr image_msg(new sensor_msgs::Image);
    ARUint8 *dataPtr;
    ARMarkerInfo *marker_info;
    int marker_num;
    int i, k, j;

    /* do we need to initialize? */
    if(!configured_)
    {
      if(msg->width == 0 || msg->height == 0)
      {
        ROS_ERROR ("Deformed cloud! Size = %d, %d.", msg->width, msg->height);
        return;
      }

      cam_param_.xsize = msg->width;
      cam_param_.ysize = msg->height;

      cam_param_.dist_factor[0] = msg->width/2;         // x0 = cX from openCV calibration
      cam_param_.dist_factor[1] = msg->height/2;        // y0 = cY from openCV calibration
      cam_param_.dist_factor[2] = 0;                    // f = -100*k1 from CV. Note, we had to do mm^2 to m^2, hence 10^8->10^2
      cam_param_.dist_factor[3] = 1.0;                  // scale factor, should probably be >1, but who cares...
      
      arInit ();
    }

    /* convert cloud to PCL */
    PointCloud cloud;
    pcl::fromROSMsg(*msg, cloud);
 
    /* get an OpenCV image from the cloud */
    pcl::toROSMsg (cloud, *image_msg);

    cv_bridge::CvImagePtr cv_ptr;
    try
    {
        cv_ptr = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR ("Could not convert from '%s' to 'bgr8'.", image_msg->encoding.c_str ());
    }
    dataPtr = (ARUint8 *) cv_ptr->image.ptr();

    /* detect the markers in the video frame */
    if (arDetectMarkerLite (dataPtr, threshold_, &marker_info, &marker_num) < 0)
    {
      argCleanup ();
      return;
    }
 
    arPoseMarkers_.markers.clear ();
    /* check for known patterns */
    for (i = 0; i < objectnum; i++)
    {
      k = -1;
      for (j = 0; j < marker_num; j++)
      {
        if (object[i].id == marker_info[j].id)
        {
          if (k == -1)
            k = j;
          else                  // make sure you have the best pattern (highest confidence factor)
          if (marker_info[k].cf < marker_info[j].cf)
            k = j;
        }
      }
      if (k == -1)
      {
        object[i].visible = 0;
        continue;
      }
      
      /* create a cloud for marker corners */
      int d = marker_info[k].dir;
      PointCloud marker;
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(4-d)%4][0], (int)marker_info[k].vertex[(4-d)%4][1] ) ); // upper left
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(5-d)%4][0], (int)marker_info[k].vertex[(5-d)%4][1] ) ); // upper right
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(6-d)%4][0], (int)marker_info[k].vertex[(6-d)%4][1] ) ); // lower right
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(7-d)%4][0], (int)marker_info[k].vertex[(7-d)%4][1] ) );

      /* create an ideal cloud */
      double w = object[i].marker_width;
      PointCloud ideal;
      ideal.push_back( makeRGBPoint(-w/2,w/2,0) );
      ideal.push_back( makeRGBPoint(w/2,w/2,0) );
      ideal.push_back( makeRGBPoint(w/2,-w/2,0) );
      ideal.push_back( makeRGBPoint(-w/2,-w/2,0) );

      /* get transformation */
      Eigen::Matrix4f t;
      TransformationEstimationSVD obj;
      obj.estimateRigidTransformation( marker, ideal, t );

      
      /* get final transformation */
      tf::Transform transform = tfFromEigen(t.inverse());
   
      // any(transform == nan)
      tf::Matrix3x3  m = transform.getBasis();
      tf::Vector3    p = transform.getOrigin();
      bool invalid = false;
      for(int i=0; i < 3; i++)
        for(int j=0; j < 3; j++)
          invalid = (invalid || isnan(m[i][j]) || fabs(m[i][j]) > 1.0);

      for(int i=0; i < 3; i++)
          invalid = (invalid || isnan(p[i]));
       

      if(invalid)
        continue; 

      /* publish the marker */
      ar_pose::ARMarker ar_pose_marker;
      ar_pose_marker.header.frame_id = msg->header.frame_id;
      ar_pose_marker.header.stamp = msg->header.stamp;
      ar_pose_marker.id = object[i].id;

      ar_pose_marker.pose.pose.position.x = transform.getOrigin().getX();
      ar_pose_marker.pose.pose.position.y = transform.getOrigin().getY();
      ar_pose_marker.pose.pose.position.z = transform.getOrigin().getZ();

      ar_pose_marker.pose.pose.orientation.x = transform.getRotation().getAxis().getX();
      ar_pose_marker.pose.pose.orientation.y = transform.getRotation().getAxis().getY();
      ar_pose_marker.pose.pose.orientation.z = transform.getRotation().getAxis().getZ();
      ar_pose_marker.pose.pose.orientation.w = transform.getRotation().getW();

      ar_pose_marker.confidence = marker_info->cf;
      arPoseMarkers_.markers.push_back (ar_pose_marker);

      /* publish transform */
      if (publishTf_)
      {
	    broadcaster_.sendTransform(tf::StampedTransform(transform, msg->header.stamp, msg->header.frame_id, object[i].name));
      }

      /* publish visual marker */

      if (publishVisualMarkers_)
      {
        tf::Vector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin);
        tf::Transform markerPose = transform * m; // marker pose in the camera frame

        tf::poseTFToMsg (markerPose, rvizMarker_.pose);

        rvizMarker_.header.frame_id = msg->header.frame_id;
        rvizMarker_.header.stamp = msg->header.stamp;
        rvizMarker_.id = object[i].id;

        rvizMarker_.scale.x = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.y = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.z = 0.5 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.ns = "basic_shapes";
        rvizMarker_.type = visualization_msgs::Marker::CUBE;
        rvizMarker_.action = visualization_msgs::Marker::ADD;
        switch (i)
        {
          case 0:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 1.0f;
            rvizMarker_.color.a = 1.0;
            break;
          case 1:
            rvizMarker_.color.r = 1.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
            break;
          default:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 1.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
        }
        rvizMarker_.lifetime = ros::Duration ();

        rvizMarkerPub_.publish (rvizMarker_);
        ROS_DEBUG ("Published visual marker");
      }
    }
    arMarkerPub_.publish (arPoseMarkers_);
    ROS_DEBUG ("Published ar_multi markers");
  }
コード例 #20
0
ZMPDistributor::ForceTorque ZMPDistributor::distributeZMP(const Eigen::Vector3f& localAnkleLeft,
                                                              const Eigen::Vector3f& localAnkleRight,
                                                              const Eigen::Matrix4f& leftFootPoseGroundFrame,
                                                              const Eigen::Matrix4f& rightFootPoseGroundFrame,
                                                              const Eigen::Vector3f& zmpRefGroundFrame,
                                                              Bipedal::SupportPhase phase)
{
    Eigen::Matrix4f groundPoseLeft  = Bipedal::projectPoseToGround(leftFootPoseGroundFrame);
    Eigen::Matrix4f groundPoseRight = Bipedal::projectPoseToGround(rightFootPoseGroundFrame);
    Eigen::Vector3f localZMPLeft    = VirtualRobot::MathTools::transformPosition(zmpRefGroundFrame, groundPoseLeft.inverse());
    Eigen::Vector3f localZMPRight   = VirtualRobot::MathTools::transformPosition(zmpRefGroundFrame, groundPoseRight.inverse());

    double alpha = computeAlpha(groundPoseLeft, groundPoseRight, zmpRefGroundFrame, localZMPLeft.head(2), localZMPRight.head(2), phase);

    //std::cout << "########## " << alpha << " ###########" << std::endl;

    ForceTorque ft;
    // kg*m/s^2 = N
    ft.leftForce  = -(1-alpha) * mass * gravity;
    ft.rightForce = -alpha     * mass * gravity;

    // Note we need force as kg*mm/s^2
    ft.leftTorque  = (localAnkleLeft  - localZMPLeft).cross(ft.leftForce * 1000);
    ft.rightTorque = (localAnkleRight - localZMPRight).cross(ft.rightForce * 1000);

    // ZMP not contained in convex polygone
    if (std::fabs(alpha) > std::numeric_limits<float>::epsilon()
     && std::fabs(alpha-1) > std::numeric_limits<float>::epsilon())
    {
        Eigen::Vector3f leftTorqueGroundFrame  = groundPoseLeft.block(0, 0, 3, 3)  * ft.leftTorque;
        Eigen::Vector3f rightTorqueGroundFrame = groundPoseRight.block(0, 0, 3, 3) * ft.rightTorque;
        Eigen::Vector3f tau0 = -1 * (leftTorqueGroundFrame + rightTorqueGroundFrame);

        //std::cout << "Tau0World: " << tau0.transpose() << std::endl;
        //std::cout << "leftTorqueWorld: "  << leftTorqueWorld.transpose() << std::endl;
        //std::cout << "rightTorqueWorld: " << rightTorqueWorld.transpose() << std::endl;

        // Note: Our coordinate system is different than in the paper!
        // Also this is not the same as the ground frame.
        Eigen::Vector3f xAxis = leftFootPoseGroundFrame.block(0,3,3,1) + localAnkleLeft
                              - localAnkleRight - rightFootPoseGroundFrame.block(0,3,3,1);
        xAxis /= xAxis.norm();
        Eigen::Vector3f zAxis(0, 0, 1);
        Eigen::Vector3f yAxis = zAxis.cross(xAxis);
        yAxis /= yAxis.norm();
        Eigen::Matrix3f centerFrame;
        centerFrame.block(0, 0, 3, 1) = xAxis;
        centerFrame.block(0, 1, 3, 1) = yAxis;
        centerFrame.block(0, 2, 3, 1) = zAxis;

        //std::cout << "Center frame:\n" << centerFrame << std::endl;

        Eigen::Vector3f centerTau0 = centerFrame.transpose() * tau0;
        Eigen::Vector3f leftTorqueCenter;
        leftTorqueCenter.x() = (1-alpha)*centerTau0.x();
        leftTorqueCenter.y() = centerTau0.y() < 0 ? centerTau0.y() : 0;
        leftTorqueCenter.z() = 0;
        Eigen::Vector3f rightTorqueCenter;
        rightTorqueCenter.x() = alpha*centerTau0.x();
        rightTorqueCenter.y() = centerTau0.y() < 0 ? 0 : centerTau0.y();
        rightTorqueCenter.z() = 0;

        //std::cout << "Tau0Center: " << centerTau0.transpose() << std::endl;
        //std::cout << "leftTorqueCenter: "  << leftTorqueCenter.transpose() << std::endl;
        //std::cout << "rightTorqueCenter: " << rightTorqueCenter.transpose() << std::endl;

        // tcp <--- ground frame <--- center frame
        ft.leftTorque  = groundPoseLeft.block(0, 0, 3, 3).transpose()  * centerFrame * leftTorqueCenter;
        ft.rightTorque = groundPoseRight.block(0, 0, 3, 3).transpose() * centerFrame * rightTorqueCenter;
    }

    // Torque depends on timestep, we need a way to do this correctly.
    const double torqueFactor = 1;
    // convert to Nm
    ft.leftTorque  *= torqueFactor / 1000.0 / 1000.0;
    ft.rightTorque *= torqueFactor / 1000.0 / 1000.0;

    //std::cout << "leftTorque: "  << ft.leftTorque.transpose() << std::endl;
    //std::cout << "rightTorque: " << ft.rightTorque.transpose() << std::endl;

    return ft;
}
コード例 #21
0
int volumetric_knt_cuda(int argc, char **argv)
{
	Timer timer;
	int vol_size = vx_count * vx_size;
	float half_vol_size = vol_size * 0.5f;

	Eigen::Vector3i voxel_size(vx_size, vx_size, vx_size);
	Eigen::Vector3i volume_size(vol_size, vol_size, vol_size);
	Eigen::Vector3i voxel_count(vx_count, vx_count, vx_count);
	int total_voxels = voxel_count.x() * voxel_count.y() * voxel_count.z();


	std::cout << std::fixed
		<< "Voxel Count  : " << voxel_count.transpose() << std::endl
		<< "Voxel Size   : " << voxel_size.transpose() << std::endl
		<< "Volume Size  : " << volume_size.transpose() << std::endl
		<< "Total Voxels : " << total_voxels << std::endl
		<< std::endl;

	timer.start();
	KinectFrame knt(filepath);
	timer.print_interval("Importing knt frame : ");

	Eigen::Affine3f grid_affine = Eigen::Affine3f::Identity();
	grid_affine.translate(Eigen::Vector3f(0, 0, half_vol_size));
	grid_affine.scale(Eigen::Vector3f(1, 1, -1));	// z is negative inside of screen
	Eigen::Matrix4f grid_matrix = grid_affine.matrix();

	float knt_near_plane = 0.1f;
	float knt_far_plane = 10240.0f;
	Eigen::Matrix4f projection = perspective_matrix<float>(KINECT_V2_FOVY, KINECT_V2_DEPTH_ASPECT_RATIO, knt_near_plane, knt_far_plane);
	Eigen::Matrix4f projection_inverse = projection.inverse();
	Eigen::Matrix4f view_matrix = Eigen::Matrix4f::Identity();

	std::vector<float4> vertices(knt.depth.size(), make_float4(0, 0, 0, 1));
	std::vector<float4> normals(knt.depth.size(), make_float4(0, 0, 1, 1));
	std::vector<Eigen::Vector2f> grid_voxels_params(total_voxels);

	// 
	// setup image parameters
	//
	unsigned short image_width = 2;
	unsigned short image_height = 2;
	uchar4* image_data = new uchar4[image_width * image_height];
	memset(image_data, 0, image_width * image_height * sizeof(uchar4));
	float4* debug_buffer = new float4[image_width * image_height];
	memset(debug_buffer, 0, image_width * image_height * sizeof(float4));

	knt_cuda_setup(
		vx_count, vx_size, 
		grid_matrix.data(), 
		projection.data(), 
		projection_inverse.data(),
		*grid_voxels_params.data()->data(),
		KINECT_V2_DEPTH_WIDTH, 
		KINECT_V2_DEPTH_HEIGHT,
		KINECT_V2_DEPTH_MIN,
		KINECT_V2_DEPTH_MAX,
		vertices.data()[0],
		normals.data()[0],
		image_width,
		image_height,
		*image_data,
		*debug_buffer
		);

	timer.start();
	knt_cuda_allocate();
	knt_cuda_init_grid();
	timer.print_interval("Allocating gpu      : ");

	timer.start();
	knt_cuda_copy_host_to_device();
	knt_cuda_copy_depth_buffer_to_device(knt.depth.data());
	timer.print_interval("Copy host to device : ");

	timer.start();
	knt_cuda_normal_estimation();
	timer.print_interval("Normal estimation   : ");

	timer.start();
	knt_cuda_update_grid(view_matrix.data());
	timer.print_interval("Update grid         : ");
	
	timer.start();
	knt_cuda_grid_params_copy_device_to_host();
	knt_cuda_copy_device_to_host();
	timer.print_interval("Copy device to host : ");

	timer.start();
	knt_cuda_free();
	timer.print_interval("Cleanup gpu         : ");


	timer.start();
	Eigen::Affine3f grid_affine_2 = Eigen::Affine3f::Identity();
	grid_affine_2.translate(Eigen::Vector3f(-half_vol_size, -half_vol_size, -vol_size));
		
	export_volume(
		"../../data/grid_volume_gpu_knt.obj",
		voxel_count,
		voxel_size,
		grid_voxels_params);
		//grid_affine_2.matrix());

	export_obj_with_colors("../../data/knt_grid_frame_normals.obj", vertices, normals);

	timer.print_interval("Exporting volume    : ");

	return 0;
}
コード例 #22
0
ファイル: segmentation.cpp プロジェクト: amelim/7495-Final
void pairAlign(const PointCloud::Ptr cloud_src, const PointCloud::Ptr cloud_tgt, 
    PointCloud::Ptr output, Eigen::Matrix4f &final_transform, bool downsample = false)
{
  PointCloud::Ptr src(new PointCloud);
  PointCloud::Ptr tgt(new PointCloud);
  pcl::VoxelGrid<PointType> grid;
  if(downsample)
  {
    grid.setLeafSize(0.05, 0.05, 0.05);
    grid.setInputCloud(cloud_src);
    grid.filter(*src);

    grid.setInputCloud(cloud_tgt);
    grid.filter(*tgt);
  }
  else
  {
    src = cloud_src;
    tgt = cloud_tgt;
  }

  PointCloudWithNormals::Ptr points_with_normals_src (new PointCloudWithNormals);
  PointCloudWithNormals::Ptr points_with_normals_tgt (new PointCloudWithNormals);

  pcl::NormalEstimation<PointType, PointNormalT> norm_est;
  pcl::search::KdTree<PointType>::Ptr tree(new pcl::search::KdTree<PointType> ());
  norm_est.setSearchMethod(tree);
  norm_est.setKSearch(30);

  norm_est.setInputCloud(src);
  norm_est.compute(*points_with_normals_src);
  pcl::copyPointCloud(*src, *points_with_normals_src);

  norm_est.setInputCloud(tgt);
  norm_est.compute(*points_with_normals_tgt);
  pcl::copyPointCloud(*tgt, *points_with_normals_tgt);

  PointRep point_representation;
  float alpha[4] = {1.0, 1.0, 1.0, 1.0};
  point_representation.setRescaleValues(alpha);

  pcl::IterativeClosestPointNonLinear<PointNormalT, PointNormalT> reg;
  reg.setTransformationEpsilon(1e-5);
  reg.setMaxCorrespondenceDistance(0.15);
  reg.setPointRepresentation(boost::make_shared<const PointRep> (point_representation));

  reg.setInputCloud(points_with_normals_src);
  reg.setInputTarget(points_with_normals_tgt);

  //Optimize
  Eigen::Matrix4f Ti = Eigen::Matrix4f::Identity(), prev, targetToSource;
  PointCloudWithNormals::Ptr reg_result = points_with_normals_src;
  reg.setMaximumIterations(2);

  for(int i = 0; i < 300; ++i)
  {
    points_with_normals_src = reg_result;

    reg.setInputCloud(points_with_normals_src);
    reg.align(*reg_result);

    Ti = reg.getFinalTransformation() * Ti;

    if(fabs ((reg.getLastIncrementalTransformation() - prev).sum()) < 
        reg.getTransformationEpsilon())
      reg.setMaxCorrespondenceDistance(reg.getMaxCorrespondenceDistance() - 0.001);

    prev = reg.getLastIncrementalTransformation();

    //showCloudsRight(points_with_normals_tgt, points_with_normals_src);
    std::cout << "Fitness: " << reg.getFitnessScore() << std::endl;
  }

  targetToSource = Ti.inverse();

  pcl::transformPointCloud(*cloud_tgt, *output, targetToSource);
  p->removePointCloud ("source");
  p->removePointCloud ("target");

  PointCloudColorHandlerCustom<PointType> cloud_tgt_h (output, 0, 255, 0);
  PointCloudColorHandlerCustom<PointType> cloud_src_h (cloud_src, 255, 0, 0);
  p->addPointCloud (output, cloud_tgt_h, "target", vp_2);
  p->addPointCloud (cloud_src, cloud_src_h, "source", vp_2);

  PCL_INFO ("Press q to continue the registration.\n");
  p->spinOnce ();

  *output += *cloud_src;
  final_transform = targetToSource;
}
コード例 #23
0
ファイル: GlobalModel.cpp プロジェクト: bigjun/ElasticFusion
void GlobalModel::clean(const Eigen::Matrix4f & pose,
                        const int & time,
                        GPUTexture * indexMap,
                        GPUTexture * vertConfMap,
                        GPUTexture * colorTimeMap,
                        GPUTexture * normRadMap,
                        GPUTexture * depthMap,
                        const float confThreshold,
                        std::vector<float> & graph,
                        const int timeDelta,
                        const float maxDepth,
                        const bool isFern)
{
    assert(graph.size() / 16 < MAX_NODES);

    if(graph.size() > 0)
    {
        //Can be optimised by only uploading new nodes with offset
        glBindTexture(GL_TEXTURE_2D, deformationNodes.texture->tid);
        glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, graph.size(), 1, GL_LUMINANCE, GL_FLOAT, graph.data());
    }

    TICK("Fuse::Copy");
    //Next we copy the new unstable vertices from the newUnstableFid transform feedback into the global map
    unstableProgram->Bind();
    unstableProgram->setUniform(Uniform("time", time));
    unstableProgram->setUniform(Uniform("confThreshold", confThreshold));
    unstableProgram->setUniform(Uniform("scale", (float)IndexMap::FACTOR));
    unstableProgram->setUniform(Uniform("indexSampler", 0));
    unstableProgram->setUniform(Uniform("vertConfSampler", 1));
    unstableProgram->setUniform(Uniform("colorTimeSampler", 2));
    unstableProgram->setUniform(Uniform("normRadSampler", 3));
    unstableProgram->setUniform(Uniform("nodeSampler", 4));
    unstableProgram->setUniform(Uniform("depthSampler", 5));
    unstableProgram->setUniform(Uniform("nodes", (float)(graph.size() / 16)));
    unstableProgram->setUniform(Uniform("nodeCols", (float)NODE_TEXTURE_DIMENSION));
    unstableProgram->setUniform(Uniform("timeDelta", timeDelta));
    unstableProgram->setUniform(Uniform("maxDepth", maxDepth));
    unstableProgram->setUniform(Uniform("isFern", (int)isFern));

    Eigen::Matrix4f t_inv = pose.inverse();
    unstableProgram->setUniform(Uniform("t_inv", t_inv));

    unstableProgram->setUniform(Uniform("cam", Eigen::Vector4f(Intrinsics::getInstance().cx(),
                                                         Intrinsics::getInstance().cy(),
                                                         Intrinsics::getInstance().fx(),
                                                         Intrinsics::getInstance().fy())));
    unstableProgram->setUniform(Uniform("cols", (float)Resolution::getInstance().cols()));
    unstableProgram->setUniform(Uniform("rows", (float)Resolution::getInstance().rows()));

    glBindBuffer(GL_ARRAY_BUFFER, vbos[target].first);

    glEnableVertexAttribArray(0);
    glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, 0);

    glEnableVertexAttribArray(1);
    glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, reinterpret_cast<GLvoid*>(sizeof(Eigen::Vector4f)));

    glEnableVertexAttribArray(2);
    glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, reinterpret_cast<GLvoid*>(sizeof(Eigen::Vector4f) * 2));

    glEnable(GL_RASTERIZER_DISCARD);

    glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, vbos[renderSource].second);

    glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, vbos[renderSource].first);

    glBeginTransformFeedback(GL_POINTS);

    glActiveTexture(GL_TEXTURE0);
    glBindTexture(GL_TEXTURE_2D, indexMap->texture->tid);

    glActiveTexture(GL_TEXTURE1);
    glBindTexture(GL_TEXTURE_2D, vertConfMap->texture->tid);

    glActiveTexture(GL_TEXTURE2);
    glBindTexture(GL_TEXTURE_2D, colorTimeMap->texture->tid);

    glActiveTexture(GL_TEXTURE3);
    glBindTexture(GL_TEXTURE_2D, normRadMap->texture->tid);

    glActiveTexture(GL_TEXTURE4);
    glBindTexture(GL_TEXTURE_2D, deformationNodes.texture->tid);

    glActiveTexture(GL_TEXTURE5);
    glBindTexture(GL_TEXTURE_2D, depthMap->texture->tid);

    glBeginQuery(GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN, countQuery);

    glDrawTransformFeedback(GL_POINTS, vbos[target].second);

    glBindBuffer(GL_ARRAY_BUFFER, newUnstableVbo);

    glEnableVertexAttribArray(0);
    glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, 0);

    glEnableVertexAttribArray(1);
    glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, reinterpret_cast<GLvoid*>(sizeof(Eigen::Vector4f)));

    glEnableVertexAttribArray(2);
    glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, reinterpret_cast<GLvoid*>(sizeof(Eigen::Vector4f) * 2));

    glDrawTransformFeedback(GL_POINTS, newUnstableFid);

    glEndQuery(GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN);

    glGetQueryObjectuiv(countQuery, GL_QUERY_RESULT, &count);

    glEndTransformFeedback();

    glDisable(GL_RASTERIZER_DISCARD);

    glBindTexture(GL_TEXTURE_2D, 0);
    glActiveTexture(GL_TEXTURE0);

    glDisableVertexAttribArray(0);
    glDisableVertexAttribArray(1);
    glDisableVertexAttribArray(2);
    glBindBuffer(GL_ARRAY_BUFFER, 0);
    glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0);

    unstableProgram->Unbind();

    std::swap(target, renderSource);

    glFinish();
    TOCK("Fuse::Copy");
}
コード例 #24
0
ファイル: icp_test.cpp プロジェクト: dejanpan/seie2011fall
int main (int argc, char** argv)
{
  PointCloudPtr cloud_source (new PointCloud);
  PointCloudPtr cloud_target (new PointCloud);
  PointCloudPtr featureCloudSourceTemp (new PointCloud);
  PointCloudPtr featureCloudTargetTemp (new PointCloud);
  PointCloudPtr cloud_converg_sparse_all (new PointCloud);
  PointCloudPtr cloud_converg_sparse_correspond (new PointCloud);
  PointCloudPtr cloud_target_sparse_correspond (new PointCloud);
  PointCloudPtr cloud_converg_dense (new PointCloud);
  PointCloudPtr cloud_ransac_estimation (new PointCloud);
  PointCloudNormalPtr cloud_source_normals (new PointCloudNormal);
  PointCloudNormalPtr cloud_target_normals (new PointCloudNormal);
  PointCloudNormalPtr featureCloudSource (new PointCloudNormal);
  PointCloudNormalPtr featureCloudTarget (new PointCloudNormal);
  Eigen::Matrix4f initialTransform;
  std::vector<int> indicesSource;
  std::vector<int> indicesTarget;

  //Fill in the cloud data
  //pcl::PCDReader reader;
  //ROS_INFO("Reading saved clouds with normals from file (faster)");
  //reader.read ("normals-source.pcd", *cloud_source_normals);
  //reader.read ("normals-target.pcd", *cloud_target_normals);
  //std::cout << "PointCloud source has: " << cloud_source->points.size () << " data points." << std::endl;
  //std::cout << "PointCloud target has: " << cloud_target->points.size () << " data points." << std::endl;


  ROS_INFO("Getting test data from a bag file");
  getTestDataFromBag(cloud_source, cloud_target, featureCloudSourceTemp, indicesSource, featureCloudTargetTemp, indicesTarget, initialTransform, 0);
  Eigen::Matrix4f ransacInverse = initialTransform.inverse();
/*
  // remove corresponances with large z values (susceptible to error)
  ROS_INFO("Removing feature correspondances with a large depth measurement");
  //removeCorrespondancesZThreshold (featureCloudSourceTemp, indicesSource, featureCloudTargetTemp, indicesTarget, 1.3);
  ROS_INFO_STREAM("indices source size: " << indicesSource.size() << "   indices target size: " << indicesTarget.size());

  //calculate normals
  ROS_INFO("Calcualting normals");
 normalEstimation(cloud_source, cloud_source_normals);
 normalEstimation(cloud_target, cloud_target_normals);

  ROS_INFO("Converting feature point clouds");
  pcl::copyPointCloud (*featureCloudSourceTemp, *featureCloudSource);
  pcl::copyPointCloud (*featureCloudTargetTemp, *featureCloudTarget);

  // here is a guess transform that was manually set to align point clouds, pure icp performs well with this
  PointCloud Final;
  Eigen::Matrix4f guess;
  guess <<   1, 0, 0, 0.07,
		     0, 1, 0, 0,
		     0, 0, 1, 0.015,
		     0, 0, 0, 1;

  ROS_INFO("Setting up icp with features");
  */
  /* custom icp
  pcl::IterativeClosestPointFeatures<pcl::PointXYZRGBNormal, pcl::PointXYZRGBNormal> icp_features;
  icp_features.setMaximumIterations (40);
  icp_features.setTransformationEpsilon (1e-9);
  icp_features.setMaxCorrespondenceDistance(0.1);
  icp_features.setRANSACOutlierRejectionThreshold(0.03);

  icp_features.setInputCloud(cloud_source_normals);
  icp_features.setInputTarget(cloud_target_normals);
  icp_features.setSourceFeatures (featureCloudSource, indicesSource);
  icp_features.setTargetFeatures (featureCloudTarget, indicesTarget);
  icp_features.setFeatureErrorWeight(0.3);  // 1 = feature, 0 = icp

  ROS_INFO("Performing rgbd icp.....");
  icp_features.align(Final, ransacInverse);
  std::cout << "ICP features has finished with converge flag of:" << icp_features.hasConverged() << " score: " <<
		  icp_features.getFitnessScore() << std::endl;
  std::cout << icp_features.getFinalTransformation() << std::endl;
*/
/*  std::vector<int> indicesSourceSmall = indicesSource;
  std::vector<int> indicesTargetSmall = indicesTarget;

  for(std::vector<int>::iterator iterator_ = indicesSource.begin(); iterator_ != indicesSource.end(); ++iterator_) {
  	*iterator_ = *iterator_ + cloud_source->size();
  }
  for(std::vector<int>::iterator iterator_ = indicesTarget.begin(); iterator_ != indicesTarget.end(); ++iterator_) {
      *iterator_ = *iterator_ + cloud_target->size();
  }

  PointCloudNormalPtr concatinatedSourceCloud (new PointCloudNormal);
  PointCloudNormalPtr concatinatedTargetCloud (new PointCloudNormal);

  *concatinatedSourceCloud = *cloud_source_normals;
  *concatinatedTargetCloud = *cloud_target_normals;

  (*concatinatedSourceCloud) += *featureCloudSource;
  (*concatinatedTargetCloud) += *featureCloudTarget;

  boost::shared_ptr< TransformationEstimationWDF<pcl::PointXYZRGBNormal,pcl::PointXYZRGBNormal> >
  		initialTransformWDF(new TransformationEstimationWDF<pcl::PointXYZRGBNormal,pcl::PointXYZRGBNormal>());

  float alpha = 1.0;
  initialTransformWDF->setAlpha(alpha);
  initialTransformWDF->setCorrespondecesDFP(indicesSource, indicesTarget);

  // Instantiate ICP
  pcl::IterativeClosestPoint<pcl::PointXYZRGBNormal, pcl::PointXYZRGBNormal> icp_wdf;

  // Set the max correspondence distance to 5cm (e.g., correspondences with higher distances will be ignored)
  icp_wdf.setMaxCorrespondenceDistance (0.05);
  // Set the maximum number of iterations (criterion 1)
  icp_wdf.setMaximumIterations (75);
  // Set the transformation epsilon (criterion 2)
  icp_wdf.setTransformationEpsilon (1e-8);
  // Set the euclidean distance difference epsilon (criterion 3)
  icp_wdf.setEuclideanFitnessEpsilon (0); //1

  // Set TransformationEstimationWDF as ICP transform estimator
  icp_wdf.setTransformationEstimation (initialTransformWDF);

  icp_wdf.setInputCloud( concatinatedSourceCloud);
  icp_wdf.setInputTarget( concatinatedTargetCloud);

  pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr cloud_transformed( new pcl::PointCloud<pcl::PointXYZRGBNormal>);
  // As before, due to my initial bad naming, it is the "target" that is being transformed
  //									set initial transform
  icp_wdf.align ( *cloud_transformed, ransacInverse); //init_tr );
  std::cout << "[SIIMCloudMatch::runICPMatch] Has converged? = " << icp_wdf.hasConverged() << std::endl <<
				"	fitness score (SSD): " << icp_wdf.getFitnessScore (1000) << std::endl;
  icp_wdf.getFinalTransformation ();

  /// Final ICP transformation is obtained by multiplying initial transformed with icp refinement
*/
/*------BEST-------------
icp.getMaximumIterations 50
icp.getRANSACOutlierRejectionThreshold() 0.02
icp.getMaxCorrespondenceDistance() 0.03
icp.getTransformationEpsilon () 1e-09
icp.getEuclideanFitnessEpsilon () -1.79769e+308
score: 0.000164332
  ---------------------
  //Normal (non modified) icp for reference
  pcl::IterativeClosestPoint<pcl::PointXYZRGBNormal, pcl::PointXYZRGBNormal> icp;
    icp.setMaximumIterations (20);
    std::cerr << "icp.getMaximumIterations " << icp.getMaximumIterations() << std::endl;

    icp.setRANSACOutlierRejectionThreshold(0.05);
    std::cerr << "icp.getRANSACOutlierRejectionThreshold() " << icp.getRANSACOutlierRejectionThreshold() << std::endl;

    icp.setMaxCorrespondenceDistance(0.05);
    std::cerr << "icp.getMaxCorrespondenceDistance() " << icp.getMaxCorrespondenceDistance() << std::endl;

    //only used for convergence test
    icp.setTransformationEpsilon (0);
    std::cerr << "icp.getTransformationEpsilon () " << icp.getTransformationEpsilon () << std::endl;

    //only used for convergence test
    std::cerr << "icp.getEuclideanFitnessEpsilon () " << icp.getEuclideanFitnessEpsilon () << std::endl;

    icp.setInputCloud(featureCloudSource);
    icp.setInputTarget(featureCloudTarget);
    pcl::PointCloud<pcl::PointXYZRGBNormal> Final_reference;

    std::cout << "ICP has starts with a score of" << icp.getFitnessScore() << std::endl;

    ROS_INFO("Performing standard icp.....");
    icp.align(Final_reference);//, guess);
    std::cout << "ICP has finished with converge flag of:" << icp.hasConverged() << " score: " << icp.getFitnessScore() << std::endl;
      std::cout << icp.getFinalTransformation() << std::endl;
*/
/*  ROS_INFO("Writing output clouds...");
  transformPointCloud (*featureCloudSourceTemp, *cloud_converg_sparse_all, icp_wdf.getFinalTransformation());
  transformPointCloud (*featureCloudSourceTemp, *cloud_converg_sparse_correspond, icp_wdf.getFinalTransformation());
  transformPointCloud (*cloud_source, *cloud_converg_dense, icp_wdf.getFinalTransformation());
  transformPointCloud (*cloud_source, *cloud_ransac_estimation, ransacInverse);

  // remove non correspondences from feature clouds as an addition output
  uint8_t col=3;
  // *cloud_converg_sparse_correspond = *cloud_converg_sparse_all;
  cloud_converg_sparse_correspond->points.resize(indicesSourceSmall.size());
  cloud_converg_sparse_correspond->height = 1;
  cloud_converg_sparse_correspond->width = (int)indicesSourceSmall.size();

  cloud_target_sparse_correspond->points.resize(indicesTargetSmall.size());
  cloud_target_sparse_correspond->height = 1;
  cloud_target_sparse_correspond->width = (int)indicesTargetSmall.size();
  for (size_t cloudId = 0; cloudId < indicesSourceSmall.size(); ++cloudId)
  {
	  cloud_converg_sparse_correspond->points[cloudId].x = cloud_converg_sparse_all->points[indicesSourceSmall[cloudId]].x;
	  cloud_converg_sparse_correspond->points[cloudId].y = cloud_converg_sparse_all->points[indicesSourceSmall[cloudId]].y;
	  cloud_converg_sparse_correspond->points[cloudId].z = cloud_converg_sparse_all->points[indicesSourceSmall[cloudId]].z;
	  cloud_converg_sparse_correspond->points[cloudId].r = col;
	  cloud_converg_sparse_correspond->points[cloudId].g = col;
	  cloud_converg_sparse_correspond->points[cloudId].b = col;
	  cloud_target_sparse_correspond->points[cloudId].x = featureCloudTarget->points[indicesTargetSmall[cloudId]].x;
	  cloud_target_sparse_correspond->points[cloudId].y = featureCloudTarget->points[indicesTargetSmall[cloudId]].y;
	  cloud_target_sparse_correspond->points[cloudId].z = featureCloudTarget->points[indicesTargetSmall[cloudId]].z;
	  cloud_target_sparse_correspond->points[cloudId].r = col;
	  cloud_target_sparse_correspond->points[cloudId].g = col;
	  cloud_target_sparse_correspond->points[cloudId].b = col;
	  //std::cerr << "point " << cloudId << "\n ";
  }

  pcl::PCDWriter writer;
  writer.write ("cloud1-out.pcd", *cloud_source, false);
  writer.write ("cloud2-out.pcd", *cloud_target, false);
  writer.write ("normals-source.pcd", *cloud_source_normals, false);
  writer.write ("normals-target.pcd", *cloud_source_normals, false);
  writer.write ("feature-source.pcd", *featureCloudSource, false);
  writer.write ("feature-target.pcd", *featureCloudTarget, false);
  writer.write ("converged-cloud.pcd", *cloud_converg_dense, false);
  writer.write ("converged-feature-all.pcd", *cloud_converg_sparse_all, false);
  writer.write ("converged-feature-correspond.pcd", *cloud_converg_sparse_correspond, false);
  writer.write ("target-feature-correspond.pcd", *cloud_target_sparse_correspond, false);
  writer.write ("ransac_estimation.pcd", *cloud_ransac_estimation, false);
 // writer.write ("converged-reference.pcd", Final_reference, false);
*/
 return (0);
}
コード例 #25
0
ファイル: KFsphere_SLAM.cpp プロジェクト: EduFdez/rgbd360
    void run(string path, const int &selectSample)
    {
        std::cout << "SphereGraphSLAM::run... " << std::endl;

        //      // Create the topological arrangement object
        //      TopologicalMap360 topologicalMap(Map);
        //      Map.currentArea = 0;

        //      // Get the calibration matrices for the different sensors
        //      Calib360 calib;
        //      calib.loadExtrinsicCalibration();
        //      calib.loadIntrinsicCalibration();

        //      // Create registration object
        //      RegisterRGBD360 registerer(mrpt::format("%s/config_files/configLocaliser_sphericalOdometry.ini", PROJECT_SOURCE_PATH));

        // Filter the point clouds before visualization
        FilterPointCloud filter;

        int frame = 282; // Skip always the first frame, which sometimes contains errors
        int frameOrder = 0;

        Eigen::Matrix4f currentPose = Eigen::Matrix4f::Identity();

        string fileName = path + mrpt::format("/sphere_images_%d.bin",frame);

        // Load first frame
        Frame360* frame360 = new Frame360(&calib);
        frame360->loadFrame(fileName);
        frame360->undistort();
        frame360->stitchSphericalImage();
        frame360->buildSphereCloud();
        frame360->getPlanes();
        //frame360->id = frame;
        frame360->id = frameOrder;
        frame360->node = Map.currentArea;

        nearestKF = frameOrder;

        filter.filterEuclidean(frame360->sphereCloud);

        Map.addKeyframe(frame360,currentPose);
//        Map.vOptimizedPoses.push_back( currentPose );
        Map.vSelectedKFs.push_back(0);
        Map.vTrajectoryIncrements.push_back(0);

        // Topological Partitioning
        Map.vsAreas.push_back( std::set<unsigned>() );
        Map.vsAreas[Map.currentArea].insert(frameOrder);
        Map.vsNeighborAreas.push_back( std::set<unsigned>() );	// Vector with numbers of neighbor areas (topological nodes)
        Map.vsNeighborAreas[Map.currentArea].insert(Map.currentArea);
        topologicalMap.vSSO.push_back( mrpt::math::CMatrix(1,1) );
        topologicalMap.vSSO[Map.currentArea](frameOrder,frameOrder) = 0.0;

        frame += selectSample;
        fileName = path + mrpt::format("/sphere_images_%d.bin",frame);

        // Start visualizer
        Map360_Visualizer Viewer(Map,1);

        // Graph-SLAM
        //      std::cout << "\t  mmConnectionKFs " << Map.mmConnectionKFs.size() << " \n";
        // LoopClosure360 loopCloser(Map);
        //        float areaMatched, currentPlanarArea;
        //      Frame360 *candidateKF = new Frame360(&calib); // A reference to the last registered frame to add as a keyframe when needed
        //      std::pair<Eigen::Matrix4f, Eigen::Matrix<float,6,6> > candidateKF_connection; // The register information of the above
        GraphOptimizer optimizer;
        optimizer.setRigidTransformationType(GraphOptimizer::SixDegreesOfFreedom);
        std::cout << "Added vertex: "<< optimizer.addVertex(currentPose.cast<double>()) << std::endl;
        bool bHasNewLC = false;

        Frame360 *candidateKF = NULL; // A reference to the last registered frame to add as a keyframe when needed
        std::pair<Eigen::Matrix4f, Eigen::Matrix<float,6,6> > candidateKF_connection; // The register information of nearestKF
        float candidateKF_sso;

        //      while( true )
        //        boost::this_thread::sleep (boost::posix_time::milliseconds (10));

        bool bPrevKFSelected = true, bGoodTracking = true;
        int lastTrackedKF = 0; // Count the number of not tracked frames after the last registered one

        // Dense RGB-D alignment
        RegisterPhotoICP align360;
        align360.setNumPyr(5);
        align360.useSaliency(false);
        //        align360.setVisualization(true);
        align360.setGrayVariance(3.f/255);
        double selectKF_ICPdist = 0.9;
//        double selectKF_ICPdist = 1.1;
        double thresholdConnections = 2.5; // Distance in meters to evaluate possible connections
        Eigen::Matrix4f rigidTransf_dense = Eigen::Matrix4f::Identity();
        Eigen::Matrix4f rigidTransf_dense_ref = rigidTransf_dense;
        // The reference of the spherical image and the point Clouds are not the same! I should always use the same coordinate system (TODO)
        float angleOffset = 157.5;
        Eigen::Matrix4f rotOffset = Eigen::Matrix4f::Identity(); rotOffset(1,1) = rotOffset(2,2) = cos(angleOffset*PI/180); rotOffset(1,2) = sin(angleOffset*PI/180); rotOffset(2,1) = -rotOffset(1,2);

        while( fexists(fileName.c_str()) )
        {
            // Load pointCloud
            //        if(!bPrevKFSelected)
            {
                cout << "Frame " << fileName << endl;
                frame360 = new Frame360(&calib);
                frame360->loadFrame(fileName);
                frame360->undistort();
                frame360->stitchSphericalImage();
                frame360->buildSphereCloud();
                frame360->getPlanes();
                frame360->id = frameOrder+1;
            }
            //            cout << " Load Keyframe \n" << endl;

            // The next frame to process
            frame += selectSample;
            fileName = path + mrpt::format("/sphere_images_%d.bin",frame);

            // Register the pair of frames
            bGoodTracking = registerer.RegisterPbMap(Map.vpSpheres[nearestKF], frame360, MAX_MATCH_PLANES, RegisterRGBD360::PLANAR_3DoF);
            Matrix4f trackedPosePbMap = registerer.getPose();
            cout << "bGoodTracking " << bGoodTracking << " with " << nearestKF << ". Distance "
                 << registerer.getPose().block(0,3,3,1).norm() << " entropy " << registerer.calcEntropy() << " matches " << registerer.getMatchedPlanes().size() << " area " << registerer.getAreaMatched() << endl;

            //            // If the registration is good, do not evaluate this KF and go to the next
            //            if( bGoodTracking  && registerer.getMatchedPlanes().size() >= min_planes_registration && registerer.getAreaMatched() > 12 )//&& registerer.getPose().block(0,3,3,1).norm() < 1.5 )
            //                //        if( bGoodTracking && registerer.getMatchedPlanes().size() > 6 && registerer.getPose().block(0,3,3,1).norm() < 1.0 )
            //            {
            //                float dist_odometry = 0; // diff_roatation
            //                if(candidateKF)
            //                    dist_odometry = (candidateKF_connection.first.block(0,3,3,1) - trackedPosePbMap.block(0,3,3,1)).norm();
            //                //          else
            //                //            dist_odometry = trackedPosePbMap.norm();
            //                if( dist_odometry < max_translation_odometry ) // Check that the registration is coherent with the camera movement (i.e. in odometry the registered frames must lay nearby)
            //                {
            //                    if(trackedPosePbMap.block(0,3,3,1).norm() > min_dist_keyframes) // Minimum distance to select a possible KF
            //                    {
            //                        cout << "Set candidateKF \n";
            //                        if(candidateKF)
            //                            delete candidateKF; // Delete previous candidateKF

            //                        candidateKF = frame360;
            //                        candidateKF_connection.first = trackedPosePbMap;
            //                        candidateKF_connection.second = registerer.getInfoMat();
            //                        candidateKF_sso = registerer.getAreaMatched() / registerer.areaSource;
            //                    }

            //                    { boost::mutex::scoped_lock updateLockVisualizer(Viewer.visualizationMutex);
            //                        Viewer.currentLocation.matrix() = (currentPose * trackedPosePbMap);
            //                        Viewer.bDrawCurrentLocation = true;
            //                    }

            //                    bPrevKFSelected = false;
            //                    lastTrackedKF = 0;
            //                }

            //                continue; // Eval next frame
            //            }

            if(bGoodTracking && registerer.getMatchedPlanes().size() >= 6 && registerer.getAreaMatched() > 12)
            {
                delete frame360;
                rigidTransf_dense_ref = rotOffset * trackedPosePbMap * rotOffset.inverse();

                { boost::mutex::scoped_lock updateLockVisualizer(Viewer.visualizationMutex);
                    Viewer.currentLocation.matrix() = (currentPose * rigidTransf_dense);
                    Viewer.bDrawCurrentLocation = true;
                }

                cout << " skip frame PbMap " << endl;
                continue;
            }

            cout << "Align Spheres " << frame360->id << " and " << Map.vpSpheres[nearestKF]->id << endl;
            double time_start_dense = pcl::getTime();
            align360.setTargetFrame(Map.vpSpheres[nearestKF]->sphereRGB, Map.vpSpheres[nearestKF]->sphereDepth);
            align360.setSourceFrame(frame360->sphereRGB, frame360->sphereDepth);
//            rigidTransf_dense_ref = rotOffset * registerer.getPose() * rotOffset.inverse();
            align360.alignFrames360(rigidTransf_dense_ref, RegisterPhotoICP::PHOTO_DEPTH); // PHOTO_CONSISTENCY / DEPTH_CONSISTENCY / PHOTO_DEPTH  Matrix4f relPoseDense = registerer.getPose();
            Eigen::Matrix4f rigidTransf_dense_ref_prev = rigidTransf_dense_ref;
            rigidTransf_dense_ref = align360.getOptimalPose();
            rigidTransf_dense = rotOffset.inverse() * rigidTransf_dense_ref * rotOffset;
            double time_end_dense = pcl::getTime();
            //std::cout << "Dense " << (time_end_dense - time_start_dense) << std::endl;
            double depth_residual = align360.avDepthResidual;
            cout << " Residuals: " << align360.avPhotoResidual << " " << align360.avDepthResidual;
            //cout << " regist \n" << rigidTransf_dense << endl;

            { boost::mutex::scoped_lock updateLockVisualizer(Viewer.visualizationMutex);
                Viewer.currentLocation.matrix() = (currentPose * rigidTransf_dense);
                Viewer.bDrawCurrentLocation = true;
            }

            if(align360.avDepthResidual < selectKF_ICPdist && isOdometryContinuousMotion(rigidTransf_dense_ref_prev, rigidTransf_dense_ref, 0.2))
            {
//                assert(align360.avDepthResidual < 1.5);

                delete frame360;
                //                bPrevKFSelected = false;
                cout << " skip frame " << endl;
                continue;
            }

            // The last frame is a candidate for a KF
            candidateKF = frame360;
            candidateKF_connection.first = rigidTransf_dense;
            candidateKF_connection.second = align360.getHessian();
            candidateKF_sso = align360.SSO;

            // Check registration with nearby keyframes
            vector<connection> vConnections;
            unsigned kf;
            for( kf=0; kf < Map.vpSpheres.size(); kf++)
                //                if(Map.vpSpheres[kf]->node == Map.currentArea && kf != nearestKF) // || Map.vsAreas[Map.currentArea].count(kf))
                if((Map.vpSpheres[kf]->node == Map.currentArea || isInNeighbourSubmap(Map.currentArea,kf)) && kf != nearestKF)
                {
                    Eigen::Matrix4f relativePose = Map.vpSpheres[kf]->pose.inverse() * currentPose * rigidTransf_dense;
                    if(relativePose.block(0,3,3,1).norm() < thresholdConnections) // If the KFs are closeby
                    {
//                        std::cout << "Check loop closure between " << kf << " the current frame " << "\n";
//                        bool bGoodRegistration = registerer.RegisterPbMap(Map.vpSpheres[kf], frame360, 25, RegisterRGBD360::RegisterRGBD360::PLANAR_3DoF);
//                        if(bGoodRegistration && registerer.getMatchedPlanes().size() > 5 && registerer.getAreaMatched() > 15.0)
                        {
//                            std::cout << "Loop closed between " << newFrameID << " and " << compareLocalIdx << " matchedArea " << registerer.getAreaMatched() << std::endl;
                            //                Eigen::Matrix<double,Eigen::Dynamic,Eigen::Dynamic> informationMatrix = registerer.getAreaMatched() * registerer.getInfoMat().cast<double>();
//                            relativePose = registerer.getPose();
//                            Eigen::Matrix4f initRigidTransfDense = rotOffset * relativePose * rotOffset.inverse();

                            align360.setTargetFrame(Map.vpSpheres[kf]->sphereRGB, Map.vpSpheres[kf]->sphereDepth);
                            align360.setSourceFrame(frame360->sphereRGB, frame360->sphereDepth);
                            Eigen::Matrix4f initRigidTransfDense = rotOffset * relativePose * rotOffset.inverse();
                            align360.alignFrames360(initRigidTransfDense, RegisterPhotoICP::PHOTO_DEPTH); // PHOTO_CONSISTENCY / DEPTH_CONSISTENCY / PHOTO_DEPTH  Matrix4f relPoseDense = registerer.getPose();
                            rigidTransf_dense_ref_prev = rigidTransf_dense_ref;
                            rigidTransf_dense_ref = align360.getOptimalPose();
                            if(isOdometryContinuousMotion(rigidTransf_dense_ref_prev, rigidTransf_dense_ref, 0.2))
                                continue;
                            Eigen::Matrix4f rigidTransf_denseKF = rotOffset.inverse() * rigidTransf_dense_ref * rotOffset;

                            cout << "CHECK WITH " << kf << " nearestKF " << nearestKF << " avDepthResidual " << align360.avDepthResidual << " sso " << align360.SSO << endl;
                            //                        cout << "dist " << (Map.vpSpheres[kf]->pose.block(0,3,3,1)-frame360->pose.block(0,3,3,1)).norm() << " " << (initRigidTransfDense.block(0,3,3,1)).norm() << endl;

                            if(align360.avDepthResidual < selectKF_ICPdist)
                                break;
                            else if(align360.avDepthResidual < 1.8) // Keep connection
                            {
                                connection Connection;
                                Connection.KF_id = kf;
                                Connection.geomConnection = pair<Eigen::Matrix4f, Eigen::Matrix<float,6,6> >(rigidTransf_denseKF, align360.getHessian());
                                Connection.sso = align360.SSO;
                                vConnections.push_back(Connection);
                            }
                            //else
                            {
                                //std::cout << "Check loop closure between " << kf << " the current frame " << "\n";
                                bool bGoodRegistration = registerer.RegisterPbMap(Map.vpSpheres[kf], frame360, 25, RegisterRGBD360::RegisterRGBD360::PLANAR_3DoF);
                                if(bGoodRegistration && registerer.getMatchedPlanes().size() > 5 && registerer.getAreaMatched() > 25.0)
                                {
                                    connection Connection;
                                    Connection.KF_id = kf;
                                    Connection.geomConnection = pair<Eigen::Matrix4f, Eigen::Matrix<float,6,6> >(registerer.getPose(), registerer.getAreaMatched() * registerer.getInfoMat());
                                    Connection.sso = align360.SSO;
                                    vConnections.push_back(Connection);
                                }
                            }
                        }
                    }
                }
            if(align360.avDepthResidual < selectKF_ICPdist)
            {
                cout << "\t align360.avDepthResidual " << align360.avDepthResidual << " " << selectKF_ICPdist << " " << kf << endl;
                if(align360.avDepthResidual < depth_residual)
                {
                    currentPose = Map.vpSpheres[kf]->pose;
                    nearestKF = kf;
                    Viewer.currentSphere = nearestKF;
                    cout << "\t nearestKF " << nearestKF << endl;
                }
                delete frame360;
                cout << "skip frame 2 " << endl;
                continue;
            }


            cout << "SSO " << align360.SSO << endl;
            cout << "Select keyframe " << frame360->id << endl;
            cout << "Information \n" << candidateKF_connection.second << endl;
            rigidTransf_dense_ref = Eigen::Matrix4f::Identity();

            ++lastTrackedKF;

            //            if(bPrevKFSelected) // If the previous frame was a keyframe, just try to register the next frame as usually
            //            {
            //                std::cout << "Tracking lost " << lastTrackedKF << "\n";
            //                //          bGoodTracking = true; // Read a new frame in the next iteration
            //                bPrevKFSelected = false;
            //                continue;
            //            }

            //            if(lastTrackedKF > 3)
            //            {
            //                std::cout << "\n  Launch Relocalization \n";
            //                double time_start = pcl::getTime();

            //                if(relocalizer.relocalize(frame360) != -1)
            //                {
            //                    double time_end = pcl::getTime();
            //                    std::cout << "Relocalization in " << Map.vpSpheres.size() << " KFs map took " << double (time_end - time_start) << std::endl;
            //                    lastTrackedKF = 0;
            //                    candidateKF = frame360;
            //                    candidateKF_connection.first = relocalizer.registerer.getPose();
            //                    candidateKF_connection.second = relocalizer.registerer.getInfoMat();
            //                    nearestKF = relocalizer.relocKF;
            //                    currentPose = Map.vTrajectoryPoses[nearestKF]; //Map.vOptimizedPoses
            //                    Viewer.currentSphere = nearestKF;
            //                    std::cout << "Relocalized with " << nearestKF << " \n\n\n";

            //                    continue;
            //                }
            //            }

            //            if(!candidateKF)
            //            {
            //                std::cout << "_Tracking lost " << lastTrackedKF << "\n";
            //                continue;
            //            }

            //            if( !shouldSelectKeyframe(candidateKF, candidateKF_connection, frame360) )
            //            {
            //                cout << "  Do not add a keyframe yet. Now the camera is around KF " << nearestKF << endl;
            //                candidateKF = NULL;
            //                currentPose = Map.vTrajectoryPoses[nearestKF]; //Map.vOptimizedPoses
            //                Viewer.currentSphere = nearestKF;

            //                frame -= selectSample;
            //                fileName = path + mrpt::format("/sphere_images_%d.bin",frame);

            //                //          lastTrackedKF = 0;
            //                //          bGoodTracking = true; // Read a new frame in the next iteration
            //                //          bPrevKFSelected = true; // This avoids selecting a KF for which there is no registration
            //                continue;
            //            }
            //
            //            bPrevKFSelected = true;
            //            frame -= selectSample;
            //            fileName = path + mrpt::format("/sphere_images_%d.bin",frame);

            currentPose = currentPose * candidateKF_connection.first;
            std::cout<< "Added vertex: "<< optimizer.addVertex(currentPose.cast<double>()) << std::endl;
            std::cout<< "Added addEdge:" << nearestKF << " " << Map.vpSpheres.size() << "\n" << candidateKF_connection.first.cast<double>() << "\n";
            optimizer.addEdge(nearestKF, Map.vpSpheres.size(), candidateKF_connection.first.cast<double>(), candidateKF_connection.second.cast<double>());

            std::cout<< " Edge PbMap: " << bGoodTracking << " " << registerer.getMatchedPlanes().size() << " " << registerer.getAreaMatched() << " " << diffRotation(trackedPosePbMap, rigidTransf_dense) << " " << difTranslation(trackedPosePbMap, rigidTransf_dense) << "\n";
            if(bGoodTracking && registerer.getMatchedPlanes().size() >= 4 && registerer.getAreaMatched() > 6)
                if(diffRotation(trackedPosePbMap, rigidTransf_dense) < 5 && difTranslation(trackedPosePbMap, rigidTransf_dense) < 0.1) // If the difference is less than 5º and 10 cm, add the PbMap connection
                {
                    optimizer.addEdge(nearestKF, Map.vpSpheres.size(), trackedPosePbMap.cast<double>(), registerer.getInfoMat().cast<double>());
                    std::cout<< "Added addEdge PbMap\n";
                    std::cout<< "Edge PbMap\n" << registerer.getInfoMat() << endl;
                    std::cout<< "Edge Dense" << candidateKF_connection.second << endl;
                }

            Map.vTrajectoryIncrements.push_back(Map.vTrajectoryIncrements.back() + candidateKF_connection.first.block(0,3,3,1).norm());
            // Filter cloud
            filter.filterEuclidean(candidateKF->sphereCloud);
        std::cout<< "filterEuclidean:\n";

            cout << "\tGet previous frame as a keyframe " << frameOrder+1 << " dist " << candidateKF_connection.first.norm() << " candidateKF_sso " << candidateKF_sso << endl;
            // Add new keyframe
            candidateKF->id = ++frameOrder;
            candidateKF->node = Map.currentArea;

            // Update topologicalMap
            int newLocalFrameID = Map.vsAreas[Map.currentArea].size();
            topologicalMap.addKeyframe(Map.currentArea);
            std::cout<< "newSizeLocalSSO "<< newLocalFrameID+1 << std::endl;
//            int newSizeLocalSSO = newLocalFrameID+1;
//            topologicalMap.vSSO[Map.currentArea].setSize(newSizeLocalSSO,newSizeLocalSSO);
//            topologicalMap.vSSO[Map.currentArea](newLocalFrameID,newLocalFrameID) = 0.0;
//            // Re-adjust size of adjacency matrices
//            for(set<unsigned>::iterator it=Map.vsNeighborAreas[Map.currentArea].begin(); it != Map.vsNeighborAreas[Map.currentArea].end(); it++)
//            {
//                assert(*it != Map.currentArea);

//                if(*it < Map.currentArea)
//                {
//                    cout << " sizeSSO neig " << topologicalMap.mmNeigSSO[*it][Map.currentArea].getRowCount() << endl;
//                    topologicalMap.mmNeigSSO[*it][Map.currentArea].setSize(5, newSizeLocalSSO);
//                }
//                else
//                {
//                    topologicalMap.mmNeigSSO[Map.currentArea][*it].setSize(newSizeLocalSSO, topologicalMap.mmNeigSSO[*it][Map.currentArea].getRowCount());
//                }
//            }

            // Track the current position. TODO: (detect inconsistencies)
            int compareLocalIdx = newLocalFrameID-1;
            std::cout<< "compareLocalIdx "<< compareLocalIdx << std::endl;
            // Lock map to add a new keyframe
            {boost::mutex::scoped_lock updateLock(Map.mapMutex);
                Map.addKeyframe(candidateKF, currentPose);
                //Map.vOptimizedPoses.push_back( Map.vOptimizedPoses[nearestKF] * candidateKF_connection.first );
                std::cout << "\t  mmConnectionKFs loop " << frameOrder << " " << nearestKF << " \n";
                Map.mmConnectionKFs[frameOrder] = std::map<unsigned, std::pair<Eigen::Matrix4f, Eigen::Matrix<float,6,6> > >();
                Map.mmConnectionKFs[frameOrder][nearestKF] = std::pair<Eigen::Matrix4f, Eigen::Matrix<float,6,6> >(candidateKF_connection.first, candidateKF_connection.second);
                Map.vsAreas[Map.currentArea].insert(frameOrder);
            }

            // Update candidateKF
            cout << "Update candidateKF " << endl;
            candidateKF = NULL;
            nearestKF = frameOrder;
            Viewer.currentSphere = nearestKF;
//            Viewer.bFreezeFrame = false;

            cout << "Add tracking SSO " << endl;
            if((topologicalMap.vSSO[Map.currentArea].getRowCount() <= newLocalFrameID) && (topologicalMap.vSSO[Map.currentArea].getRowCount() <= compareLocalIdx))
                cout << "\n\n\t ERROR vSSO\n";

            topologicalMap.vSSO[Map.currentArea](newLocalFrameID,compareLocalIdx) =
                    topologicalMap.vSSO[Map.currentArea](compareLocalIdx,newLocalFrameID) = candidateKF_sso;
            cout << "SSO is " << topologicalMap.vSSO[Map.currentArea](newLocalFrameID,compareLocalIdx) << endl;

            // Add for Map.mmConnectionKFs in the previous frames.
            if(vConnections.size() > 0)// && frameOrder !=3  && frameOrder !=4)
            {
                bHasNewLC = true;
                cout << "Add vConnections " << vConnections.size() << endl;
                for(unsigned link=0; link < vConnections.size(); link++)
                {
//                    cout << "Add vConnections between " << frameOrder << " " << vConnections[link].KF_id << "\n";
//                    cout << vConnections[link].KF_id << " sso " << vConnections[link].sso << "\n";
                    Map.mmConnectionKFs[frameOrder][vConnections[link].KF_id] = vConnections[link].geomConnection;
//                    cout << "Add vConnections_ A\n";
                    topologicalMap.addConnection(unsigned(frameOrder), vConnections[link].KF_id, vConnections[link].sso);
//                    cout << "Add vConnections_ B\n";
                    optimizer.addEdge(vConnections[link].KF_id, unsigned(frameOrder), vConnections[link].geomConnection.first.cast<double>(), vConnections[link].geomConnection.second.cast<double>());
//                cout << "frameOrder " << frameOrder << " " << Map.vpSpheres.size() << endl;
                }
            }

            //        // Calculate distance of furthest registration (approximated, we do not know if the last frame compared is actually the furthest)
            //        std::map<unsigned, pair<unsigned,Eigen::Matrix4f> >::iterator itFurthest = Map.mmConnectionKFs[frameOrder].begin(); // Aproximated
            //        cout << "Furthest distance is " << itFurthest->second.second.block(0,3,3,1).norm() << " diffFrameNum " << newLocalFrameID - itFurthest->first << endl;


            //            // Synchronize topologicalMap with loopCloser
            //            while( !loopCloser.connectionsLC.empty() )
            //            {
            //              std::map<unsigned, std::map<unsigned, float> >::iterator it_connectLC1 = loopCloser.connectionsLC.begin();
            //              while( !it_connectLC1->second.empty() )
            //              {
            //                std::map<unsigned, float>::iterator it_connectLC2 = it_connectLC1->second.begin();
            //                if(Map.vpSpheres[it_connectLC1->first]->node != Map.currentArea || Map.vpSpheres[it_connectLC2->first]->node != Map.currentArea)
            //                {
            //                  loopCloser.connectionsLC.erase(it_connectLC1);
            //                  continue;
            //                }
            //                int localID1 = std::distance(Map.vsAreas[Map.currentArea].begin(), Map.vsAreas[Map.currentArea].find(it_connectLC1->first));
            //                int localID2 = std::distance(Map.vsAreas[Map.currentArea].begin(), Map.vsAreas[Map.currentArea].find(it_connectLC2->first));
            //              cout << "Add LC SSO " << topologicalMap.vSSO[Map.currentArea].getRowCount() << " " << localID1 << " " << localID2 << endl;
            //                topologicalMap.vSSO[Map.currentArea](localID1,localID2) = topologicalMap.vSSO[Map.currentArea](localID2,localID1) =
            //                                                                          it_connectLC2->second;
            //                it_connectLC1->second.erase(it_connectLC2);
            //              }
            //              loopCloser.connectionsLC.erase(it_connectLC1);
            //            }

            // Optimize graphSLAM with g2o
            if(bHasNewLC)
            {
                std::cout << "Optimize graph SLAM \n";
                double time_start = pcl::getTime();
                //Optimize the graph
//                for(int j=0; j < Map.vpSpheres.size(); j++)
//                {
////                    std::cout << "optimizedPose " << poseIdx << " submap " << Map.vpSpheres[j]->node << " same? " << Map.vOptimizedPoses[j]==Map.vTrajectoryPoses[j] ? 1 : 0 << std::endl;
//                    g2o::OptimizableGraph::Vertex *vertex_j = optimizer.optimizer.vertex(j);
//                    if(Map.vpSpheres[j]->node == Map.currentArea && j != Map.vSelectedKFs[Map.currentArea])
//                        vertex_j->setFixed(false);
//                    else
//                        vertex_j->setFixed(true);
//                }
                //VertexContainer vertices = optimizer.activeVertices.activeVertices();

                optimizer.optimizeGraph();
                double time_end = pcl::getTime();
                std::cout << " Optimize graph SLAM " << Map.vpSpheres.size() << " took " << double (time_end - time_start) << " s.\n";

//                for(int j=0; j < Map.vpSpheres.size(); j++)
//                    std::cout << "optimizedPose " << j << endl << Map.vOptimizedPoses[j] << endl;

                //Update the optimized poses (for loopClosure detection & visualization)
                {
                    boost::mutex::scoped_lock updateLock(Map.mapMutex);
                    optimizer.getPoses(Map.vOptimizedPoses);
                    //Viewer.currentLocation.matrix() = Map.vOptimizedPoses.back();
                }

//                for(int j=0; j < Map.vpSpheres.size(); j++)
//                    std::cout << "optimizedPose " << j << endl << Map.vOptimizedPoses[j] << endl;

                //Update the optimized pose
                //Eigen::Matrix4f &newPose = Map.vOptimizedPoses.back();
//                std::cout << "First pose optimized \n" << Map.vOptimizedPoses[0] << std::endl;

//                assert(Map.vpSpheres.size() == Map.vOptimizedPoses.size());
                for(int j=0; j < Map.vpSpheres.size(); j++)
                    std::cout << "optimizedPose " << j << " " << optimizer.optimizer.vertex(j)->fixed() << " submap " << Map.vpSpheres[j]->node << std::endl; //<< " same? " << Map.vOptimizedPoses[j]==Map.vTrajectoryPoses[j] ? 1 : 0 << std::endl;
//                std::cout << "newPose \n" << Map.vOptimizedPoses.back() << "\n previous \n" << currentPose << std::endl;

                currentPose = Map.vOptimizedPoses.back();
                bHasNewLC = false;
            }

            // Visibility divission (Normalized-cut)
            if(newLocalFrameID % 4 == 0)
            {
                //                    for(unsigned i=0; i < Map.vTrajectoryPoses.size(); i++)
                //                        cout << "pose " << i << "\n" << Map.vTrajectoryPoses[i] << endl;

                double time_start = pcl::getTime();
                //          cout << "Eval partitions\n";
                topologicalMap.Partitioner();

//                // Set only the current submap's keyframes as optimizable vertex in Graph-SLAM
//                for(int j=0; j < Map.vpSpheres.size(); j++)
//                {
////                    std::cout << "optimizedPose " << poseIdx << " submap " << Map.vpSpheres[j]->node << " same? " << Map.vOptimizedPoses[j]==Map.vTrajectoryPoses[j] ? 1 : 0 << std::endl;
//                    g2o::OptimizableGraph::Vertex *vertex_j = optimizer.optimizer.vertex(j);
//                    if(Map.vpSpheres[j]->node == Map.currentArea && j != Map.vSelectedKFs[Map.currentArea])
//                        vertex_j->setFixed(false);
//                    else
//                        vertex_j->setFixed(true);
//                }

                //                      for(unsigned i=0; i < Map.vTrajectoryPoses.size(); i++)
                //                          cout << "pose " << i << "\n" << Map.vTrajectoryPoses[i] << endl;

                cout << "\tPlaces\n";
                for( unsigned node = 0; node < Map.vsNeighborAreas.size(); node++ )
                {
                    cout << "\t" << node << ":";
                    for( set<unsigned>::iterator it = Map.vsNeighborAreas[node].begin(); it != Map.vsNeighborAreas[node].end(); it++ )
                        cout << " " << *it;
                    cout << endl;
                }

                double time_end = pcl::getTime();
                std::cout << " Eval partitions took " << double (time_end - time_start)*10e3 << "ms" << std::endl;
            }

            //if(Map.vsNeighborAreas.size() > 1)
                //mrpt::system::pause();

//            Viewer.bFreezeFrame = false;
        }

        //      while (!Viewer.viewer.wasStopped() )
        //        boost::this_thread::sleep (boost::posix_time::milliseconds (10));

        cout << "Path length " << Map.vTrajectoryIncrements.back() << endl;
    }
コード例 #26
0
ファイル: KFsphere_SLAM.cpp プロジェクト: EduFdez/rgbd360
    // This function decides to select a new keyframe when the proposed keyframe does not have keyframes too near/related
    bool shouldSelectKeyframe(Frame360 *candidateKF, std::pair<Eigen::Matrix4f, Eigen::Matrix<float,6,6> > candidateKF_connection, Frame360* currentFrame)
    {
        cout << "shouldSelectKeyframe ...\n";
        double time_start = pcl::getTime();

        RegisterPhotoICP align360; // Dense RGB-D alignment
        align360.setNumPyr(5);
        align360.useSaliency(false);
        align360.setVisualization(true);
        align360.setGrayVariance(3.f/255);

        // The reference of the spherical image and the point Clouds are not the same! I should always use the same coordinate system (TODO)
        float angleOffset = 157.5;
        Eigen::Matrix4f rotOffset = Eigen::Matrix4f::Identity(); rotOffset(1,1) = rotOffset(2,2) = cos(angleOffset*PI/180); rotOffset(1,2) = sin(angleOffset*PI/180); rotOffset(2,1) = -rotOffset(1,2);

        cout << "Align Sphere " << endl;
        double time_start_dense = pcl::getTime();
        align360.setTargetFrame(Map.vpSpheres[nearestKF]->sphereRGB, Map.vpSpheres[nearestKF]->sphereDepth);
        align360.setSourceFrame(candidateKF->sphereRGB, candidateKF->sphereDepth);
        //                align360.alignFrames360(Eigen::Matrix4f::Identity(), RegisterPhotoICP::PHOTO_CONSISTENCY); // PHOTO_CONSISTENCY / DEPTH_CONSISTENCY / PHOTO_DEPTH  Matrix4f relPoseDense = registerer.getPose();
        align360.alignFrames360(rotOffset * candidateKF_connection.first * rotOffset.inverse(), RegisterPhotoICP::PHOTO_DEPTH); // PHOTO_CONSISTENCY / DEPTH_CONSISTENCY / PHOTO_DEPTH  Matrix4f relPoseDense = registerer.getPose();
        Eigen::Matrix4f rigidTransf_dense = rotOffset.inverse() * align360.getOptimalPose() * rotOffset;
        double time_end_dense = pcl::getTime();
        std::cout << "Dense " << (time_end_dense - time_start_dense) << std::endl;
        cout << "Dense regist \n" << rigidTransf_dense << " \nRegist-PbMap \n" << candidateKF_connection.first << endl;

        //        math::mrpt::CPose3D
        if(!rigidTransf_dense.isApprox(candidateKF_connection.first,1e-1))
        {
            std::cout << "shouldSelectKeyframe INVALID KF\n";
            return false;
        }
        else
        {
            candidateKF_connection.first = rigidTransf_dense;
        }

        double dist_candidateKF = candidateKF_connection.first.block(0,3,3,1).norm();
        Eigen::Matrix4f candidateKF_pose = Map.vTrajectoryPoses[nearestKF] * candidateKF_connection.first;
        map<float,unsigned> nearest_KFs;
        for(std::set<unsigned>::iterator itKF = Map.vsAreas[Map.currentArea].begin(); itKF != Map.vsAreas[Map.currentArea].end(); itKF++) // Set the iterator to the previous KFs not matched yet
        {
            if(*itKF == nearestKF)
                continue;

            double dist_to_prev_KF = (candidateKF_pose.block(0,3,3,1) - Map.vTrajectoryPoses[*itKF].block(0,3,3,1)).norm();
            cout << *itKF << " dist_to_prev_KF " << dist_to_prev_KF << " dist_candidateKF " << dist_candidateKF << endl;
            if( dist_to_prev_KF < dist_candidateKF ) // Check if there are even nearer keyframes
                nearest_KFs[dist_to_prev_KF] = *itKF;
        }
        for(map<float,unsigned>::iterator itNearKF = nearest_KFs.begin(); itNearKF != nearest_KFs.end(); itNearKF++) // Set the iterator to the previous KFs not matched yet
        {
            cout << "itNearKF " << itNearKF->first << " " << itNearKF->second << endl;
            bool bGoodRegistration = registerer.RegisterPbMap(Map.vpSpheres[itNearKF->second], candidateKF, MAX_MATCH_PLANES, RegisterRGBD360::PLANAR_3DoF);
            if(bGoodRegistration && registerer.getMatchedPlanes().size() >= min_planes_registration && registerer.getAreaMatched() > 6 )
            {
                nearestKF = itNearKF->second;
                double time_end = pcl::getTime();
                std::cout << "shouldSelectKeyframe INVALID KF. took " << double (time_end - time_start) << std::endl;
                return false;
            }
        }
        double time_end = pcl::getTime();
        std::cout << "shouldSelectKeyframe VALID KF. took " << double (time_end - time_start) << std::endl;
        //      cout << "shouldSelectKeyframe VALID KF\n";
        return true;
    }
コード例 #27
0
ファイル: icp_test.cpp プロジェクト: dejanpan/seie2011fall
void getTestDataFromBag(PointCloudPtr cloud_source, PointCloudPtr cloud_target,
		PointCloudPtr featureCloudSource, std::vector<int> &indicesSource,
		PointCloudPtr featureCloudTarget, std::vector<int> &indicesTarget,
		Eigen::Matrix4f &initialTransform, int rosMessageNumber)
{
	sensor_msgs::PointCloud2::Ptr cloud_source_filtered (new sensor_msgs::PointCloud2 ());
	sensor_msgs::PointCloud2::Ptr cloud_target_filtered (new sensor_msgs::PointCloud2 ());
	PointCloudPtr cloud_ransac_estimation (new PointCloud);

  	pcl::PCDWriter writer;
  	std::stringstream filename;

	rosbag::Bag bag;
	bag.open("/media/burg/data/bagfiles/bench1-ManySweeps4-lowfeatureThresNode2.bag", rosbag::bagmode::Read);

	std::vector<std::string> topics;
	topics.push_back(std::string("/feature_match_out_topic"));

	rosbag::View view(bag, rosbag::TopicQuery(topics));

	int i = 1;
	BOOST_FOREACH(rosbag::MessageInstance const m, view)
	{
		if(( i == rosMessageNumber) || (rosMessageNumber==0))
		{
			pcl_tutorial::featureMatch::ConstPtr fm = m.instantiate<pcl_tutorial::featureMatch>();

			ROS_INFO("Converting point cloud message to local pcl clouds");

			sensor_msgs::PointCloud2::Ptr cloud_source_temp_Ptr (new sensor_msgs::PointCloud2 (fm->sourcePointcloud));
			sensor_msgs::PointCloud2::Ptr cloud_target_temp_Ptr (new sensor_msgs::PointCloud2 (fm->targetPointcloud));

			voxFilterPointCloud(cloud_source_temp_Ptr, cloud_source_filtered);
			voxFilterPointCloud(cloud_target_temp_Ptr, cloud_target_filtered);
			ROS_INFO("Converting dense clouds");
			pcl::fromROSMsg(*cloud_source_filtered, *cloud_source);
			pcl::fromROSMsg(*cloud_target_filtered, *cloud_target);
			ROS_INFO("Converting sparse clouds");
			pcl::fromROSMsg(fm->sourceFeatureLocations, *featureCloudSource);
			pcl::fromROSMsg(fm->targetFeatureLocations, *featureCloudTarget);

			ROS_INFO("Converting geometry message to eigen4f");
		    tf::Transform trans;
		    tf::transformMsgToTF(fm->featureTransform,trans);
		    initialTransform = EigenfromTf(trans);
		    ROS_INFO_STREAM("transform from ransac: " << "\n"  << initialTransform << "\n");

		    ROS_INFO("Extracting corresponding indices");
		    //int j = 1;
		    indicesSource.clear();
		    indicesTarget.clear();
		  	for(std::vector<pcl_tutorial::match>::const_iterator iterator_ = fm->matches.begin(); iterator_ != fm->matches.end(); ++iterator_)
		  	{
		  		indicesSource.push_back(iterator_->trainId);
		  		indicesTarget.push_back(iterator_->queryId);
		  	//	ROS_INFO_STREAM("source point " << j << ": "   << featureCloudSource->points[iterator_->queryId].x << ", " << featureCloudSource->points[iterator_->queryId].y << ", " << featureCloudSource->points[iterator_->queryId].z);
		  	//	ROS_INFO_STREAM("target point " << j++ << ": " << featureCloudTarget->points[iterator_->trainId].x << ", " << featureCloudTarget->points[iterator_->trainId].y << ", " << featureCloudTarget->points[iterator_->trainId].z);
		  	//	ROS_INFO("qidx: %d tidx: %d iidx: %d dist: %f", iterator_->queryId, iterator_->trainId, iterator_->imgId, iterator_->distance);
		  	}
		    /*for (size_t cloudId = 0; cloudId < featureCloudSource->points.size (); ++cloudId)
		    {
		    	ROS_INFO_STREAM("feature cloud: " << cloudId << ": " << featureCloudSource->points[cloudId].x << "; " << featureCloudSource->points[cloudId].y << "; " << featureCloudSource->points[cloudId].z);
		    }*/

		  	Eigen::Matrix4f ransacInverse = initialTransform.inverse();
		  	transformPointCloud (*cloud_source, *cloud_ransac_estimation, ransacInverse);

		    ROS_INFO("Writing point clouds");
		  	filename << "cloud" << i << "DenseSource.pcd";
		  	writer.write (filename.str(), *cloud_source, false);
		  	filename.str("");
		  	filename << "cloud" << i << "DenseTarget.pcd";
		  	writer.write (filename.str(), *cloud_target, true);
		  	filename.str("");
		  	filename << "cloud" << i << "RANSAC-" << (int)indicesSource.size() << "-inliers.pcd";
		  	writer.write (filename.str(), *cloud_ransac_estimation, true);
		  	filename.str("");
		  	filename << "cloud" << i << "SparseSource.pcd";
		  	writer.write (filename.str(), *featureCloudSource, false);
		  	filename.str("");
		  	filename << "cloud" << i << "SparseTarget.pcd";
		    writer.write (filename.str(), *featureCloudTarget, false);
		    filename.str("");
		  	i++;

		  //  for(std::vector<int>::iterator iterator_ = indicesSource.begin(); iterator_ != indicesSource.end(); ++iterator_) {
		  //  	ROS_INFO("source indice: %d", *iterator_);
		  //  }
		}
		else
			i++;
	}
	bag.close();
}
コード例 #28
0
Transform Transform::inverse() const
{
	Eigen::Matrix4f m = util3d::transformToEigen4f(*this);
	return util3d::transformFromEigen4f(m.inverse());
}
コード例 #29
0
/** \brief Align a pair of PointCloud datasets and return the result
  * \param cloud_src the source PointCloud
  * \param cloud_tgt the target PointCloud
  * \param output the resultant aligned source PointCloud
  * \param final_transform the resultant transform between source and target
  */
void pairAlign (const PointCloud::Ptr cloud_src, const PointCloud::Ptr cloud_tgt, PointCloud::Ptr output, Eigen::Matrix4f &final_transform, bool downsample = false)
{
  //
  // Downsample for consistency and speed
  // \note enable this for large datasets
  PointCloud::Ptr src (new PointCloud);
  PointCloud::Ptr tgt (new PointCloud);
  pcl::VoxelGrid<PointT> grid;
  if (downsample)
  {
    grid.setLeafSize (0.05, 0.05, 0.05);
    grid.setInputCloud (cloud_src);
    grid.filter (*src);

    grid.setInputCloud (cloud_tgt);
    grid.filter (*tgt);
  }
  else
  {
    src = cloud_src;
    tgt = cloud_tgt;
  }


  // Compute surface normals and curvature
  PointCloudWithNormals::Ptr points_with_normals_src (new PointCloudWithNormals);
  PointCloudWithNormals::Ptr points_with_normals_tgt (new PointCloudWithNormals);

  pcl::NormalEstimation<PointT, PointNormalT> norm_est;
  pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ> ());
  norm_est.setSearchMethod (tree);
  norm_est.setKSearch (30);

  norm_est.setInputCloud (src);
  norm_est.compute (*points_with_normals_src);
  pcl::copyPointCloud (*src, *points_with_normals_src);

  norm_est.setInputCloud (tgt);
  norm_est.compute (*points_with_normals_tgt);
  pcl::copyPointCloud (*tgt, *points_with_normals_tgt);

  //
  // Instantiate our custom point representation (defined above) ...
  MyPointRepresentation point_representation;
  // ... and weight the 'curvature' dimension so that it is balanced against x, y, and z
  float alpha[4] = {1.0, 1.0, 1.0, 1.0};
  point_representation.setRescaleValues (alpha);

  //
  // Align
  pcl::IterativeClosestPointNonLinear<PointNormalT, PointNormalT> reg;
  reg.setTransformationEpsilon (1e-6);
  // Set the maximum distance between two correspondences (src<->tgt) to 10cm
  // Note: adjust this based on the size of your datasets
  reg.setMaxCorrespondenceDistance (0.1);
  // Set the point representation
  reg.setPointRepresentation (boost::make_shared<const MyPointRepresentation> (point_representation));

  reg.setInputSource (points_with_normals_src);
  reg.setInputTarget (points_with_normals_tgt);



  //
  // Run the same optimization in a loop and visualize the results
  Eigen::Matrix4f Ti = Eigen::Matrix4f::Identity (), prev, targetToSource;
  PointCloudWithNormals::Ptr reg_result = points_with_normals_src;
  reg.setMaximumIterations (2);
  for (int i = 0; i < 30; ++i)
  {
    PCL_INFO ("Iteration Nr. %d.\n", i);

    // save cloud for visualization purpose
    points_with_normals_src = reg_result;

    // Estimate
    reg.setInputSource (points_with_normals_src);
    reg.align (*reg_result);

    //accumulate transformation between each Iteration
    Ti = reg.getFinalTransformation () * Ti;

    //if the difference between this transformation and the previous one
    //is smaller than the threshold, refine the process by reducing
    //the maximal correspondence distance
    if (fabs ((reg.getLastIncrementalTransformation () - prev).sum ()) < reg.getTransformationEpsilon ())
      reg.setMaxCorrespondenceDistance (reg.getMaxCorrespondenceDistance () - 0.001);

    prev = reg.getLastIncrementalTransformation ();

    // visualize current state
    showCloudsRight(points_with_normals_tgt, points_with_normals_src);
  }

  //
  // Get the transformation from target to source
  targetToSource = Ti.inverse();

  //
  // Transform target back in source frame
  pcl::transformPointCloud (*cloud_tgt, *output, targetToSource);

  p->removePointCloud ("source");
  p->removePointCloud ("target");

  PointCloudColorHandlerCustom<PointT> cloud_tgt_h (output, 0, 255, 0);
  PointCloudColorHandlerCustom<PointT> cloud_src_h (cloud_src, 255, 0, 0);
  p->addPointCloud (output, cloud_tgt_h, "target", vp_2);
  p->addPointCloud (cloud_src, cloud_src_h, "source", vp_2);

  PCL_INFO ("Press q to continue the registration.\n");
  p->spin ();

  p->removePointCloud ("source");
  p->removePointCloud ("target");

  //add the source to the transformed target
  *output += *cloud_src;

  final_transform = targetToSource;
 }
コード例 #30
0
Transformation * UpdateWeightFilterv3::filterTransformation(Transformation * input){
	
	struct timeval start, end;
	gettimeofday(&start, NULL);
	RGBDFrame * src = input->src;
	RGBDFrame * dst = input->dst;
	FrameInput * src_fi = src->input;
	FrameInput * dst_fi = dst->input;

	Transformation * transformation = input->clone();

/*	
	Transformation * transformation = new Transformation();

	transformation->transformationMatrix = input->transformationMatrix;
	transformation->src = src;
	transformation->dst = dst;
	
	transformation->weight = input->weight;//0;
	for(int i = 0; i < input->matches.size();i++){
		KeyPoint * src_kp = input->matches.at(i).first;
		KeyPoint * dst_kp = input->matches.at(i).second;
		transformation->matches.push_back(make_pair(src_kp, dst_kp));
	}
*/

	Eigen::Matrix4f transformationMat  = input->transformationMatrix;
	Eigen::Matrix4f transformationMat_inv = transformationMat.inverse();

	float counter_good = 0;
	float counter_bad = 0;
	float counter_total = 0;

	float d1[dst->validation_points.size()];

	int nr_valid = 0;

	src_fi->getDiff(transformationMat_inv , dst->validation_points , d1, nr_valid );
	for(int i = 0; i < nr_valid; i++){
		float diff = d1[i];
		if(fabs(diff) < limit)		{counter_good++;}
		counter_total++;
	}

	float d2[src->validation_points.size()];
	nr_valid = 0;
	dst_fi->getDiff(transformationMat , src->validation_points , d2, nr_valid );
	for(int i = 0; i < nr_valid; i++){
		float diff = d2[i];
		if(fabs(diff) < limit)		{counter_good++;}
		counter_total++;
	}

	if(counter_total < min_points){counter_total = min_points;}
	transformation->weight = 10 + (counter_good-bad_weight*counter_bad)/counter_total;

	//printf("input w: %f update weight results: %f %f %f-> %f\n",input->weight,counter_good,counter_bad,counter_total,transformation->weight);

	gettimeofday(&end, NULL);
	float time = (end.tv_sec*1000000+end.tv_usec-(start.tv_sec*1000000+start.tv_usec))/1000000.0f;
	//printf("UpdateWeightFilterv3 cost: %f\n",time);
	
	return transformation;
}