コード例 #1
0
void CoSegmentation::point2point(Matrix3X & srCloud,Matrix3X & tgCloud,Matrix33 & rotMat,MatrixXX & transVec)
{
	Vector3Type X_mean, Y_mean;

	for(int i=0; i<3; ++i) //计算两点云的均值
	{
		X_mean(i) = (ScalarType)srCloud.row(i).sum()/srCloud.cols();
		Y_mean(i) = (ScalarType)tgCloud.row(i).sum()/tgCloud.cols();
	}

	srCloud.colwise() -= X_mean;
	tgCloud.colwise() -= Y_mean;

	/// Compute transformation
	Eigen::Affine3f transformation;
	Eigen::Matrix3f sigma = (srCloud * tgCloud.transpose()).cast<float>();
	Eigen::JacobiSVD<Eigen::Matrix3f> svd(sigma, Eigen::ComputeFullU | Eigen::ComputeFullV);
	if(svd.matrixU().determinant()*svd.matrixV().determinant() < 0.0)//contains reflection
	{
		Vector3Type S = Vector3Type::Ones(); S(2) = -1.0;
		transformation.linear().noalias() = svd.matrixV()*S.asDiagonal()*svd.matrixU().transpose();
	} else 
	{
		transformation.linear().noalias() = svd.matrixV()*svd.matrixU().transpose();//计算旋转矩阵
	}

	transVec = Y_mean -( transformation.linear().cast<ScalarType>()*X_mean);
	rotMat = transformation.linear().cast<ScalarType>() ;

	srCloud.colwise() += X_mean;
	tgCloud.colwise() += Y_mean;

}
コード例 #2
0
ファイル: kinfu_viz_tools.cpp プロジェクト: adamwtow/JAMESapc
    Eigen::Affine3f getViewerPose (visualization::PCLVisualizer& viewer)
    {
      Eigen::Affine3f pose = viewer.getViewerPose();
      Eigen::Matrix3f rotation = pose.linear();

      Matrix3f axis_reorder;
      axis_reorder << 0,  0,  1,
                     -1,  0,  0,
                      0, -1,  0;

      rotation = rotation * axis_reorder;
      pose.linear() = rotation;
      return pose;
    }
コード例 #3
0
ファイル: screenshot_manager.cpp プロジェクト: Bardo91/pcl
     void
     ScreenshotManager::saveImage(const Eigen::Affine3f &camPose, PtrStepSz<const PixelRGB> rgb24)
     {

       PCL_INFO ("[o] [o] [o] [o] Saving screenshot [o] [o] [o] [o]\n");

       std::string file_extension_image = ".png";
       std::string file_extension_pose = ".txt";
       std::string filename_image = "KinFuSnapshots/";
       std::string filename_pose = "KinFuSnapshots/";

       // Get Pose
       Eigen::Matrix<float, 3, 3, Eigen::RowMajor> erreMats = camPose.linear ();
		   Eigen::Vector3f teVecs = camPose.translation ();

		   // Create filenames
		   filename_pose = filename_pose + boost::lexical_cast<std::string> (screenshot_counter) + file_extension_pose;
		   filename_image = filename_image + boost::lexical_cast<std::string> (screenshot_counter) + file_extension_image;

		   // Write files
		   writePose (filename_pose, teVecs, erreMats);
        
       // Save Image
       pcl::io::saveRgbPNGFile (filename_image, (unsigned char*)rgb24.data, 640,480);
        
       screenshot_counter++;
     }
コード例 #4
0
ファイル: commandsubscriber.cpp プロジェクト: caomw/ros_kinfu
bool CommandSubscriber::getForcedTfFrame(Eigen::Affine3f & result) const
  {
  tf::StampedTransform transform;
  try
    {
    m_tf_listener.lookupTransform(m_forced_tf_frame_reference,m_forced_tf_frame_name,ros::Time(0),transform);
    }
    catch (tf::TransformException ex)
      {
      ROS_ERROR("kinfu: hint was forced by TF, but retrieval failed because: %s",ex.what());
      return false;
      }

  Eigen::Vector3f vec;
  vec.x() = transform.getOrigin().x();
  vec.y() = transform.getOrigin().y();
  vec.z() = transform.getOrigin().z();
  Eigen::Quaternionf quat;
  quat.x() = transform.getRotation().x();
  quat.y() = transform.getRotation().y();
  quat.z() = transform.getRotation().z();
  quat.w() = transform.getRotation().w();

  result.linear() = Eigen::AngleAxisf(quat).matrix();
  result.translation() = vec;
  return true;
  }
コード例 #5
0
ファイル: kinfu.cpp プロジェクト: neeljp/pcl
Eigen::Affine3f
pcl::gpu::kinfuLS::KinfuTracker::getLastEstimatedPose () const
{
  Eigen::Affine3f aff;
  aff.linear () = last_estimated_rotation_;
  aff.translation () = last_estimated_translation_;
  return (aff);
}
コード例 #6
0
void FakeKinect::setWorldFromCam(const Eigen::Affine3f &worldFromCam) {
  Vector3f eye, center, up;
  Matrix3f rot;
  rot = worldFromCam.linear();
  eye = worldFromCam.translation();
  center = rot.col(2);
  up = -rot.col(1);
  m_cam->setViewMatrixAsLookAt(toOSGVector(eye), toOSGVector(center+eye), toOSGVector(up));
}
コード例 #7
0
bool applyTransform(std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > &points, std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > &normals, const Eigen::Affine3f &t)
{
    const Eigen::Matrix3f normalMatrix = t.linear().inverse().transpose();
    for (size_t i = 0; i < points.size(); ++i) {
        points[i] = t * points[i];
        normals[i] = (normalMatrix * normals[i]).normalized();
    }

    return true;
}
コード例 #8
0
ファイル: kinfu.cpp プロジェクト: BITVoyager/pcl
Eigen::Affine3f
pcl::gpu::KinfuTracker::getCameraPose (int time) const
{
  if (time > (int)rmats_.size () || time < 0)
    time = rmats_.size () - 1;

  Eigen::Affine3f aff;
  aff.linear () = rmats_[time];
  aff.translation () = tvecs_[time];
  return (aff);
}
コード例 #9
0
inline void
RGBID_SLAM::CameraView::drawMotionUncertainty(const Eigen::Matrix<float,6,6> cov_mat, const Eigen::Affine3f& pose, double r, double g, double b)
{
  //boost::mutex::scoped_lock lock(display_mutex_);
  
  Eigen::Matrix3f A = cov_mat.block<3,3>(0,0);
  Eigen::JacobiSVD<Eigen::Matrix3f> svd(A,Eigen::ComputeThinU);
  Eigen::Vector3f singval_A = svd.singularValues();
  Eigen::Matrix3f singvec_A = svd.matrixU();
  Eigen::Matrix3f singval_A_mat = Eigen::Matrix3f::Identity();
  singval_A_mat(0,0) = singval_A(0);
  singval_A_mat(1,1) = singval_A(1);
  singval_A_mat(2,2) = singval_A(2);
  Eigen::Affine3f trafo;
  Eigen::Matrix3f trafomat = 10000.*singvec_A*singval_A_mat.cwiseSqrt();
  trafo.linear() = trafomat;
  //std::cout << trafo.linear() << std::endl;
  //std::cout << trafo.translation() << std::endl;
  pcl::PointXYZ p0x, p1x, p0y, p1y, p0z, p1z;
  p0x.x = -1.; p0x.y = 0.;  p0x.z = 0.;
  p1x.x = 1.;  p1x.y = 0.;  p1x.z = 0.;
  p0y.x = 0.;  p0y.y = -1.; p0y.z = 0.;
  p1y.x = 0.;  p1y.y = 1.;  p1y.z = 0.;
  p0z.x = 0.;  p0z.y = 0.;  p0z.z = -1.;
  p1z.x = 0.;  p1z.y = 0.;  p1z.z = 1.;
  
  p0x = pcl::transformPoint (p0x, trafo);
  p1x = pcl::transformPoint (p1x, trafo);
  p0y = pcl::transformPoint (p0y, trafo);
  p1y = pcl::transformPoint (p1y, trafo);
  p0z = pcl::transformPoint (p0z, trafo);
  p1z = pcl::transformPoint (p1z, trafo);
  
  p0x = pcl::transformPoint (p0x, pose);
  p1x = pcl::transformPoint (p1x, pose);
  p0y = pcl::transformPoint (p0y, pose);
  p1y = pcl::transformPoint (p1y, pose);
  p0z = pcl::transformPoint (p0z, pose);
  p1z = pcl::transformPoint (p1z, pose);
  
  
  
  std::stringstream ss;
  ss.str ("");
  ss << name_ << "_uncline1";
  cloud_viewer_.addLine (p0x, p1x, r, g, b, ss.str ());
  ss.str ("");
  ss << name_ << "_uncline2";
  cloud_viewer_.addLine (p0y, p1y, r, g, b, ss.str ());
  ss.str ("");
  ss << name_ << "_uncline3";
  cloud_viewer_.addLine (p0z, p1z, r, g, b, ss.str ());

}
コード例 #10
0
	void CGLUtil::viewerGL()
	{
		glMatrixMode(GL_MODELVIEW);
		// load the matrix to set camera pose
		glLoadMatrixf(_eimModelViewGL.data());
		
		//rotation
		Eigen::Matrix3f eimRotation;
		if( btl::utility::BTL_GL == _eConvention ){
			eimRotation = Eigen::AngleAxisf(float(_dXAngle*M_PI/180.f), Eigen::Vector3f::UnitY())* Eigen::AngleAxisf(float(_dYAngle*M_PI/180.f), Eigen::Vector3f::UnitX());                         // 3. rotate horizontally
		}//mouse x-movement is the rotation around y-axis
		else if( btl::utility::BTL_CV == _eConvention )	{
			eimRotation = Eigen::AngleAxisf(float(_dXAngle*M_PI/180.f), -Eigen::Vector3f::UnitY())* Eigen::AngleAxisf(float(_dYAngle*M_PI/180.f), Eigen::Vector3f::UnitX());                         // 3. rotate horizontally
		}
		//translation
		/*_dZoom = _dZoom < 0.1? 0.1: _dZoom;
		_dZoom = _dZoom > 10? 10: _dZoom;*/

		//get direction N pointing from camera center to the object centroid
		Eigen::Affine3f M; M = _eimModelViewGL;
		Eigen::Vector3f T = M.translation();
		Eigen::Matrix3f R = M.linear();
		Eigen::Vector3f C = -R.transpose()*T;//camera centre
		Eigen::Vector3f N = _eivCentroid - C;//from camera centre to object centroid
		N = N/N.norm();//normalization

		Eigen::Affine3f _eimManipulate; _eimManipulate.setIdentity();
		_eimManipulate.translate( N*float(_dZoom) );//(N*(1-_dZoom));  //use camera movement toward object for zoom in/out effects
		_eimManipulate.translate(_eivCentroid);  // 5. translate back to the original camera pose
		//_eimManipulate.scale(s);				 // 4. zoom in/out, never use scale to simulate camera movement, it affects z-buffer capturing. use translation instead
		_eimManipulate.rotate(eimRotation);		 // 2. rotate vertically // 3. rotate horizontally
		_eimManipulate.translate(-_eivCentroid); // 1. translate the camera center to align with object centroid*/
		glMultMatrixf(_eimManipulate.data());

		/*	
		lTranslated( _aCentroid[0], _aCentroid[1], _aCentroid[2] ); // 5. translate back to the original camera pose
		_dZoom = _dZoom < 0.1? 0.1: _dZoom;
		_dZoom = _dZoom > 10? 10: _dZoom;
		glScaled( _dZoom, _dZoom, _dZoom );                      //  4. zoom in/out, 
		if( btl::utility::BTL_GL == _eConvention )
		glRotated ( _dXAngle, 0, 1 ,0 );                         // 3. rotate horizontally
		else if( btl::utility::BTL_CV == _eConvention )			//mouse x-movement is the rotation around y-axis
		glRotated ( _dXAngle, 0,-1 ,0 ); 
		glRotated ( _dYAngle, 1, 0 ,0 );                             // 2. rotate vertically
		glTranslated(-_aCentroid[0],-_aCentroid[1],-_aCentroid[2] ); // 1. translate the camera center to align with object centroid
		*/

		// light position in 3d
		glLightfv(GL_LIGHT0, GL_POSITION, _aLight);
	}
コード例 #11
0
  Eigen::Affine3f WorldDownloadManager::toEigenAffine(T1 linear,T2 translation)
{
  Eigen::Matrix3f l;
  Eigen::Vector3f t;

  for (uint r = 0; r < 3; r++)
    for (uint c = 0; c < 3; c++)
      l(r,c) = linear[r * 3 + c];

  for (uint r = 0; r < 3; r++)
    t[r] = translation[r];

  Eigen::Affine3f result;
  result.linear() = l;
  result.translation() = t;
  return result;
}
コード例 #12
0
geometry_msgs::Pose transformEigenAffine3fToPose(Eigen::Affine3f e) {
    Eigen::Vector3f Oe;
    Eigen::Matrix3f Re;
    geometry_msgs::Pose pose;
    Oe = e.translation();
    Re = e.linear();

    Eigen::Quaternionf q(Re); // convert rotation matrix Re to a quaternion, q
    pose.position.x = Oe(0);
    pose.position.y = Oe(1);
    pose.position.z = Oe(2);

    pose.orientation.x = q.x();
    pose.orientation.y = q.y();
    pose.orientation.z = q.z();
    pose.orientation.w = q.w();

    return pose;
}
コード例 #13
0
ファイル: AmPclUtil.cpp プロジェクト: Hirucon/KinfuSuperRes
            void
            printPose( Eigen::Affine3f const& pose )
            {
                // debug
                std::cout << pose.linear() << std::endl <<
                             pose.translation().transpose() << std::endl;

                float alpha = atan2(  pose.linear()(1,0), pose.linear()(0,0) );
                float beta  = atan2( -pose.linear()(2,0),
                                     sqrt( pose.linear()(2,1) * pose.linear()(2,1) +
                                           pose.linear()(2,2) * pose.linear()(2,2)  )
                                     );
                float gamma = atan2(  pose.linear()(2,1), pose.linear()(2,2) );

                std::cout << "alpha: " << alpha << " " << alpha * 180.f / M_PI << std::endl;
                std::cout << "beta: "  << beta  << " " << beta  * 180.f / M_PI << std::endl;
                std::cout << "gamma: " << gamma << " " << gamma * 180.f / M_PI << std::endl;
            }
コード例 #14
0
int main(int argc, char** argv) {
    ros::init(argc, argv, "object_finder_node"); // name this node 

    ROS_INFO("instantiating the object finder action server: ");

    ObjectFinder object_finder_as; // create an instance of the class "ObjectFinder"
    tf::TransformListener tfListener;
    ROS_INFO("listening for kinect-to-base transform:");
    tf::StampedTransform stf_kinect_wrt_base;
    bool tferr = true;
    ROS_INFO("waiting for tf between kinect_pc_frame and base_link...");
    while (tferr) {
        tferr = false;
        try {
            //The direction of the transform returned will be from the target_frame to the source_frame. 
            //Which if applied to data, will transform data in the source_frame into the target_frame. 
            //See tf/CoordinateFrameConventions#Transform_Direction
            tfListener.lookupTransform("base_link", "kinect_pc_frame", ros::Time(0), stf_kinect_wrt_base);
        } catch (tf::TransformException &exception) {
            ROS_WARN("%s; retrying...", exception.what());
            tferr = true;
            ros::Duration(0.5).sleep(); // sleep for half a second
            ros::spinOnce();
        }
    }
    ROS_INFO("kinect to base_link tf is good");
    object_finder_as.xformUtils_.printStampedTf(stf_kinect_wrt_base);
    tf::Transform tf_kinect_wrt_base = object_finder_as.xformUtils_.get_tf_from_stamped_tf(stf_kinect_wrt_base);
    g_affine_kinect_wrt_base = object_finder_as.xformUtils_.transformTFToAffine3f(tf_kinect_wrt_base);
    cout << "affine rotation: " << endl;
    cout << g_affine_kinect_wrt_base.linear() << endl;
    cout << "affine offset: " << g_affine_kinect_wrt_base.translation().transpose() << endl;

    ROS_INFO("going into spin");
    // from here, all the work is done in the action server, with the interesting stuff done within "executeCB()"
    while (ros::ok()) {
        ros::spinOnce(); //normally, can simply do: ros::spin();  
        ros::Duration(0.1).sleep();
    }

    return 0;
}
コード例 #15
0
	void CGLUtil::getRTFromWorld2CamCV(Eigen::Matrix3f* pRw_, Eigen::Vector3f* pTw_) {
		//only the matrix in the top of the modelview matrix stack works
		Eigen::Affine3f M;
		glGetFloatv(GL_MODELVIEW_MATRIX,M.matrix().data());

		Eigen::Matrix3f S;
		*pTw_ = M.translation();
		*pRw_ = M.linear();
		//M.computeRotationScaling(pRw_,&S);
		//*pTw_ = (*pTw_)/S(0,0);
		//negate row no. 1 and 2, to switch from GL to CV convention
		for (int r = 1; r < 3; r++){
			for	(int c = 0; c < 3; c++){
				(*pRw_)(r,c) = -(*pRw_)(r,c);
			}
			(*pTw_)(r) = -(*pTw_)(r); 
		}
		//PRINT(S);
		//PRINT(*pRw_);
		//PRINT(*pTw_);
		return;
	}
コード例 #16
0
ファイル: world_model.hpp プロジェクト: 2php/pcl
void 
pcl::kinfuLS::WorldModel<PointT>::getExistingData(const double previous_origin_x, const double previous_origin_y, const double previous_origin_z, const double offset_x, const double offset_y, const double offset_z, const double volume_x, const double volume_y, const double volume_z, pcl::PointCloud<PointT> &existing_slice)
{
  double newOriginX = previous_origin_x + offset_x; 
  double newOriginY = previous_origin_y + offset_y; 
  double newOriginZ = previous_origin_z + offset_z;
  double newLimitX = newOriginX + volume_x; 
  double newLimitY = newOriginY + volume_y; 
  double newLimitZ = newOriginZ + volume_z;
	
  // filter points in the space of the new cube
  PointCloudPtr newCube (new pcl::PointCloud<PointT>);
  // condition
  ConditionAndPtr range_condAND (new pcl::ConditionAnd<PointT> ());
  range_condAND->addComparison (FieldComparisonConstPtr (new pcl::FieldComparison<PointT> ("x", pcl::ComparisonOps::GE, newOriginX)));
  range_condAND->addComparison (FieldComparisonConstPtr (new pcl::FieldComparison<PointT> ("x", pcl::ComparisonOps::LT, newLimitX)));
  range_condAND->addComparison (FieldComparisonConstPtr (new pcl::FieldComparison<PointT> ("y", pcl::ComparisonOps::GE, newOriginY)));
  range_condAND->addComparison (FieldComparisonConstPtr (new pcl::FieldComparison<PointT> ("y", pcl::ComparisonOps::LT, newLimitY)));
  range_condAND->addComparison (FieldComparisonConstPtr (new pcl::FieldComparison<PointT> ("z", pcl::ComparisonOps::GE, newOriginZ)));
  range_condAND->addComparison (FieldComparisonConstPtr (new pcl::FieldComparison<PointT> ("z", pcl::ComparisonOps::LT, newLimitZ))); 
  
  // build the filter
  pcl::ConditionalRemoval<PointT> condremAND (true);
  condremAND.setCondition (range_condAND);
  condremAND.setInputCloud (world_);
  condremAND.setKeepOrganized (false);
  
  // apply filter
  condremAND.filter (*newCube);
	
  // filter points that belong to the new slice
  ConditionOrPtr range_condOR (new pcl::ConditionOr<PointT> ());
  
  if(offset_x >= 0)
	range_condOR->addComparison (FieldComparisonConstPtr (new pcl::FieldComparison<PointT> ("x", pcl::ComparisonOps::GE,  previous_origin_x + volume_x - 1.0 )));
  else
	range_condOR->addComparison (FieldComparisonConstPtr (new pcl::FieldComparison<PointT> ("x", pcl::ComparisonOps::LT,  previous_origin_x )));
	
  if(offset_y >= 0)
	range_condOR->addComparison (FieldComparisonConstPtr (new pcl::FieldComparison<PointT> ("y", pcl::ComparisonOps::GE,  previous_origin_y + volume_y - 1.0 )));
  else
	range_condOR->addComparison (FieldComparisonConstPtr (new pcl::FieldComparison<PointT> ("y", pcl::ComparisonOps::LT,  previous_origin_y )));
	
  if(offset_z >= 0)
	range_condOR->addComparison (FieldComparisonConstPtr (new pcl::FieldComparison<PointT> ("z", pcl::ComparisonOps::GE,  previous_origin_z + volume_z - 1.0 )));
  else
	range_condOR->addComparison (FieldComparisonConstPtr (new pcl::FieldComparison<PointT> ("z", pcl::ComparisonOps::LT,  previous_origin_z )));
  
  // build the filter
  pcl::ConditionalRemoval<PointT> condrem (true);
  condrem.setCondition (range_condOR);
  condrem.setInputCloud (newCube);
  condrem.setKeepOrganized (false);
  // apply filter
  condrem.filter (existing_slice);  
 
  if(existing_slice.points.size () != 0)
  {
	//transform the slice in new cube coordinates
	Eigen::Affine3f transformation; 
	transformation.translation ()[0] = newOriginX;
	transformation.translation ()[1] = newOriginY;
	transformation.translation ()[2] = newOriginZ;
		
	transformation.linear ().setIdentity ();

	transformPointCloud (existing_slice, existing_slice, transformation.inverse ());
	
  }
}
コード例 #17
0
ファイル: camera.hpp プロジェクト: LCG-UFRJ/tucano
 /**
  * @brief Returns the center of the camera in world space.
  * @return Camera center
  */
 Eigen::Vector3f getCenter (void) const
 {
     return view_matrix.linear().inverse() * (-view_matrix.translation());
 }