Пример #1
0
// Finds the 3D position of a point (in reference frame B) given the inputs
TooN::Vector<3> MapMakerServerBase::ReprojectPoint(TooN::SE3<> se3AfromB, const TooN::Vector<3>& v3A,
                                                     const TooN::Vector<3>& v3B)
{
  // Algorithm from Hartley & Zisserman's Multiple View Geometry book section 12.2

  TooN::Matrix<3, 4> PDash;
  PDash.slice<0, 0, 3, 3>() = se3AfromB.get_rotation().get_matrix();
  PDash.slice<0, 3, 3, 1>() = se3AfromB.get_translation().as_col();

  TooN::Matrix<4> A;
  A[0][0] = -v3B[2];
  A[0][1] = 0.0;
  A[0][2] = v3B[0];
  A[0][3] = 0.0;
  A[1][0] = 0.0;
  A[1][1] = -v3B[2];
  A[1][2] = v3B[1];
  A[1][3] = 0.0;
  A[2] = v3A[0] * PDash[2] - v3A[2] * PDash[0];
  A[3] = v3A[1] * PDash[2] - v3A[2] * PDash[1];

  TooN::SVD<4, 4> svd(A);
  TooN::Vector<4> v4Smallest = svd.get_VT()[3];
  if (v4Smallest[3] == 0.0)
    v4Smallest[3] = 0.00001;

  return project(v4Smallest);
}
Пример #2
0
void MapView::plotCam(TooN::SE3<> droneToGlobal, bool xyCross, float thick, float len, float alpha)
{
	glEnable(GL_DEPTH_TEST);

	// draw cam
	glMatrixMode(GL_MODELVIEW);  
	glLoadIdentity();
	glScaled(0.1,0.1,0.1);
	CVD::glMultMatrix(mse3ViewerFromWorld * droneToGlobal);
	SetupFrustum();


	glLineWidth(thick*lineWidthFactor);

	if(alpha < 1)
	{
		glEnable(GL_BLEND); 
		glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
	}
	else
		glDisable(GL_BLEND); 


	glBegin(GL_LINES);
	glColor4f(1,0,0,alpha);
	glVertex3f(-len, 0.0f, 0.0f);
	glVertex3f(len, 0.0f, 0.0f);
	glVertex3f(0.0f, 0.0f, 0.0f);
	glVertex3f(0.0f, -len, 0.0f);

	glColor4f(0,1,0,alpha);
	glVertex3f(0.0f, 0.0f, 0.0f);
	glVertex3f(0.0f, len, 0.0f);

	glColor4f(1,1,1,alpha);
	glVertex3f(0.0f, 0.0f, 0.0f);
	glVertex3f(0.0f, 0.0f, len);
	glEnd();

	
	if(xyCross)
	{
		glLineWidth(1*lineWidthFactor);
		glColor4f(1,1,1, alpha);
		SetupModelView();
		TooN::Vector<2> v2CamPosXY = droneToGlobal.get_translation().slice<0,2>();
		glBegin(GL_LINES);
		glColor4f(1,1,1, alpha);
		glVertex2d(v2CamPosXY[0] - 0.04, v2CamPosXY[1] + 0.04);
		glVertex2d(v2CamPosXY[0] + 0.04, v2CamPosXY[1] - 0.04);
		glVertex2d(v2CamPosXY[0] - 0.04, v2CamPosXY[1] - 0.04);
		glVertex2d(v2CamPosXY[0] + 0.04, v2CamPosXY[1] + 0.04);
		glEnd();
	}
	glDisable(GL_BLEND); 
	glDisable(GL_DEPTH_TEST);
	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
}
Пример #3
0
// Draw the reference grid to give the user an idea of wether tracking is OK or not.
void PTAMWrapper::renderGrid(TooN::SE3<> camFromWorld)
{
	myGLWindow->SetupViewport();
	myGLWindow->SetupVideoOrtho();
	myGLWindow->SetupVideoRasterPosAndZoom();

	camFromWorld.get_translation() *= 1;

	// The colour of the ref grid shows if the coarse stage of tracking was used
	// (it's turned off when the camera is sitting still to reduce jitter.)
	glColor4f(0,0,0,0.6);
  
	// The grid is projected manually, i.e. GL receives projected 2D coords to draw.
	int nHalfCells = 5;
	int nTot = nHalfCells * 2 + 1;
	CVD::Image<Vector<2> >  imVertices(CVD::ImageRef(nTot,nTot));
	for(int i=0; i<nTot; i++)
		for(int j=0; j<nTot; j++)
		{
			Vector<3> v3;
			v3[0] = (i - nHalfCells) * 1;
			v3[1] = (j - nHalfCells) * 1;
			v3[2] = 0.0;
			Vector<3> v3Cam = camFromWorld * v3;
			//v3Cam[2] *= 100;
			if(v3Cam[2] < 0.001)
				v3Cam = TooN::makeVector(100000*v3Cam[0],100000*v3Cam[1],0.0001);

			imVertices[i][j] = mpCamera->Project(TooN::project(v3Cam))*0.5;
		}

	glEnable(GL_LINE_SMOOTH);
	glEnable(GL_BLEND);
	glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
	glLineWidth(2);
	for(int i=0; i<nTot; i++)
	{
		glBegin(GL_LINE_STRIP);
		for(int j=0; j<nTot; j++)
		CVD::glVertex(imVertices[i][j]);
		glEnd();
      
		glBegin(GL_LINE_STRIP);
		for(int j=0; j<nTot; j++)
		CVD::glVertex(imVertices[j][i]);
		glEnd();
	};
  
  glLineWidth(1);
  glColor3f(1,0,0);



}
Пример #4
0
void EstimationNode::publishTf(TooN::SE3<> trans, ros::Time stamp, int seq, std::string system)
{
	trans = trans.inverse();

	tf::Matrix3x3 m;
	m[0][0] = trans.get_rotation().get_matrix()(0,0);
	m[0][1] = trans.get_rotation().get_matrix()(0,1);
	m[0][2] = trans.get_rotation().get_matrix()(0,2);
	m[1][0] = trans.get_rotation().get_matrix()(1,0);
	m[1][1] = trans.get_rotation().get_matrix()(1,1);
	m[1][2] = trans.get_rotation().get_matrix()(1,2);
	m[2][0] = trans.get_rotation().get_matrix()(2,0);
	m[2][1] = trans.get_rotation().get_matrix()(2,1);
	m[2][2] = trans.get_rotation().get_matrix()(2,2);

	tf::Vector3 v;
	v[0] = trans.get_translation()[0];
	v[1] = trans.get_translation()[1];
	v[2] = trans.get_translation()[2];


	tf::Transform tr = tf::Transform(m,v);
	tf::StampedTransform t = tf::StampedTransform(tr,stamp,"map",system);
	tf_broadcaster.sendTransform(t);



	if(logfilePTAMRaw != NULL)
	{
		pthread_mutex_lock(&(logPTAMRaw_CS));
		// log:
		// - filterPosePrePTAM estimated for videoFrameTimestamp-delayVideo.
		// - PTAMResulttransformed estimated for videoFrameTimestamp-delayVideo. (using imu only for last step)
		// - predictedPoseSpeed estimated for lastNfoTimestamp+filter->delayControl	(actually predicting)
		// - predictedPoseSpeedATLASTNFO estimated for lastNfoTimestamp	(using imu only)
		if(logfilePTAMRaw != NULL)
			(*(logfilePTAMRaw)) << seq << " " << stamp << " " << tr.getOrigin().x() << " " << tr.getOrigin().y() << " " << tr.getOrigin().z() << " " <<
			tr.getRotation().x() << " " << tr.getRotation().y() << " " << tr.getRotation().z() << " " << tr.getRotation().w() << std::endl;

		pthread_mutex_unlock(&(logPTAMRaw_CS));
	}

}
Пример #5
0
// Rotates/translates the whole map and all keyFrames
void MapMakerServerBase::ApplyGlobalTransformationToMap(TooN::SE3<> se3NewFromOld)
{
  for (MultiKeyFramePtrList::iterator it = mMap.mlpMultiKeyFrames.begin(); it != mMap.mlpMultiKeyFrames.end(); ++it)
  {
    MultiKeyFrame& mkf = *(*it);
    mkf.mse3BaseFromWorld = mkf.mse3BaseFromWorld * se3NewFromOld.inverse();
    for (KeyFramePtrMap::iterator jiter = mkf.mmpKeyFrames.begin(); jiter != mkf.mmpKeyFrames.end(); ++jiter)
    {
      KeyFrame& kf = *(jiter->second);
      kf.mse3CamFromWorld = kf.mse3CamFromBase * mkf.mse3BaseFromWorld; 
    }
  }

  for (MapPointPtrList::iterator it = mMap.mlpPoints.begin(); it != mMap.mlpPoints.end(); ++it)
  {
    MapPoint& point = *(*it);
    point.mv3WorldPos = se3NewFromOld * point.mv3WorldPos;
    point.RefreshPixelVectors();
  }
}
Пример #6
0
void AbstractView::drawPose3D(const TooN::SE3<double> & pose)
{
  glPushMatrix();
  Vector<3> center = RobotVision::trans2center(pose);

  glTranslate(center);
  Vector<3> aa = pose.inverse().get_rotation().ln();
  double angle = norm(aa);
  if ( angle != 0.0 ) {
    glRotatef( angle * 180.0 / M_PI, aa[0], aa[1], aa[2]);
  }
  drawLine3D(TooN::makeVector(0,0,0), TooN::makeVector(0.05, 0, 0));
  drawLine3D(TooN::makeVector(0,0,0), TooN::makeVector(0, 0.05, 0));
  drawLine3D(TooN::makeVector(0,0,0), TooN::makeVector(0, 0, 0.05));
  drawLine3D(TooN::makeVector(0.025,0.025,0),
             TooN::makeVector(-0.025, 0.025, 0));
  drawLine3D(TooN::makeVector(-0.025,-0.025,0),
             TooN::makeVector(-0.025, 0.025, 0));
  drawLine3D(TooN::makeVector(-0.025,-0.025,0),
             TooN::makeVector(0.025, -0.025, 0));
  drawLine3D(TooN::makeVector(0.025,0.025,0),
             TooN::makeVector(0.025, -0.025, 0));
  glPopMatrix();
}
Пример #7
0
	void PosedCamera::SetPose(const toon::SE3<>& pose) {
		pose_ = pose;
		invpose_ = pose.inverse();
	}
Пример #8
0
void AbstractView::setPose(const TooN::SE3<>& T_wc )
{
  T_cw_ = T_wc.inverse();
}
Пример #9
0
// Publish the current tracker pose
void SystemFrontendBase::PublishPose()
{
  ROS_ASSERT(mpTracker);

  if (mpTracker->GetTrackingQuality() == Tracker::NONE || mpTracker->IsLost())
    return;

  geometry_msgs::PoseWithCovarianceStamped pose_cov_msg;
  pose_cov_msg.header.stamp = mpTracker->GetCurrentTimestamp();
  pose_cov_msg.header.frame_id = "vision_world";

  TooN::SE3<> pose = mpTracker->GetCurrentPose().inverse();
  TooN::Matrix<3> rot = pose.get_rotation().get_matrix();
  TooN::Matrix<6> cov = mpTracker->GetCurrentCovariance();

  // clear cross correlation
  cov.slice<0, 3, 3, 3>() = TooN::Zeros;
  cov.slice<3, 0, 3, 3>() = TooN::Zeros;

  // Change covariance matrix frame from camera to world
  cov.slice<0, 0, 3, 3>() = rot * cov.slice<0, 0, 3, 3>() * rot.T();
  cov.slice<3, 3, 3, 3>() = rot * cov.slice<3, 3, 3, 3>() * rot.T();

  // Some hacky heuristics here
  if (mpTracker->GetTrackingQuality() == Tracker::GOOD)
  {
    cov = cov * 1e2;
  }
  else if (mpTracker->GetTrackingQuality() == Tracker::DODGY)
  {
    cov = cov * 1e5;
  }
  else if (mpTracker->GetTrackingQuality() == Tracker::BAD)
  {
    cov = cov * 1e8;
  }

  pose_cov_msg.pose.pose = util::SE3ToPoseMsg(pose);

  int numElems = 6;
  for (int i = 0; i < numElems; ++i)
  {
    for (int j = 0; j < numElems; ++j)
    {
      pose_cov_msg.pose.covariance[i * numElems + j] = cov(i, j);
    }
  }

  mPoseWithCovPub.publish(pose_cov_msg);

  static tf::TransformBroadcaster br;
  tf::Transform transform;
  tf::poseMsgToTF(pose_cov_msg.pose.pose, transform);
  br.sendTransform(tf::StampedTransform(transform, mpTracker->GetCurrentTimestamp(), "vision_world", "vision_pose"));

  SE3Map mPoses = mpTracker->GetCurrentCameraPoses();

  geometry_msgs::PoseArray pose_array_msg;
  pose_array_msg.header.stamp = pose_cov_msg.header.stamp;
  pose_array_msg.header.frame_id = pose_cov_msg.header.frame_id;
  pose_array_msg.poses.resize(1 + mPoses.size());
  pose_array_msg.poses[0] = pose_cov_msg.pose.pose;

  int i = 1;
  for (SE3Map::iterator it = mPoses.begin(); it != mPoses.end(); ++it, ++i)
  {
    pose_array_msg.poses[i] = util::SE3ToPoseMsg(it->second.inverse());
    tf::poseMsgToTF(pose_array_msg.poses[i], transform);
    br.sendTransform(tf::StampedTransform(transform, mpTracker->GetCurrentTimestamp(), "vision_world", it->first));
  }

  mPoseArrayPub.publish(pose_array_msg);
}