Example #1
0
/* ************************************************************************* */
double Pose3::range(const Pose3& pose, OptionalJacobian<1, 6> H1,
                    OptionalJacobian<1, 6> H2) const {
  Matrix13 D_local_point;
  double r = range(pose.translation(), H1, H2 ? &D_local_point : 0);
  if (H2) *H2 << Matrix13::Zero(), D_local_point * pose.rotation().matrix();
  return r;
}
//***************************************************************************
TEST(GPSData, init) {

  // GPS Reading 1 will be ENU origin
  double t1 = 84831;
  Point3 NED1(0, 0, 0);
  LocalCartesian enu(35.4393283333333, -119.062986666667, 275.54,
      Geocentric::WGS84);

  // GPS Readin 2
  double t2 = 84831.5;
  double E, N, U;
  enu.Forward(35.4394633333333, -119.063146666667, 276.52, E, N, U);
  Point3 NED2(N, E, -U);

  // Estimate initial state
  Pose3 T;
  Vector3 nV;
  boost::tie(T, nV) = GPSFactor::EstimateState(t1, NED1, t2, NED2, 84831.0796);

  // Check values values
  EXPECT(assert_equal((Vector )Vector3(29.9575, -29.0564, -1.95993), nV, 1e-4));
  EXPECT( assert_equal(Rot3::ypr(-0.770131, 0.046928, 0), T.rotation(), 1e-5));
  Point3 expectedT(2.38461, -2.31289, -0.156011);
  EXPECT(assert_equal(expectedT, T.translation(), 1e-5));
}
void Compound::addGeometry(const Pose3<>& parentPose, Geometry& geometry, SimRobotCore2::CollisionCallback* callback)
{
  // compute pose
  Pose3<> geomPose = parentPose;
  if(geometry.translation)
    geomPose.translate(*geometry.translation);
  if(geometry.rotation)
    geomPose.rotate(*geometry.rotation);

  // create geometry
  dGeomID geom = geometry.createGeometry(Simulation::simulation->staticSpace);
  if(geom)
  {
    dGeomSetData(geom, &geometry);

    // set pose
    dGeomSetPosition(geom, geomPose.translation.x, geomPose.translation.y, geomPose.translation.z);
    dMatrix3 matrix3;
    ODETools::convertMatrix(geomPose.rotation, matrix3);
    dGeomSetRotation(geom, matrix3);
  }

  // handle nested geometries
  for(std::list< ::PhysicalObject*>::const_iterator iter = geometry.physicalDrawings.begin(), end = geometry.physicalDrawings.end(); iter != end; ++iter)
  {
    Geometry* geometry = dynamic_cast<Geometry*>(*iter);
    if(geometry)
      addGeometry(geomPose, *geometry, callback);
  }
}
void Frustum_Filter::init_z_near_z_far_depth
(
  const SfM_Data & sfm_data,
  const double zNear,
  const double zFar
)
{
  // If z_near & z_far are -1 and structure if not empty,
  //  compute the values for each camera and the structure
  const bool bComputed_Z = (zNear == -1. && zFar == -1.) && !sfm_data.structure.empty();
  if (bComputed_Z)  // Compute the near & far planes from the structure and view observations
  {
    for (Landmarks::const_iterator itL = sfm_data.GetLandmarks().begin();
      itL != sfm_data.GetLandmarks().end(); ++itL)
    {
      const Landmark & landmark = itL->second;
      const Vec3 & X = landmark.X;
      for (Observations::const_iterator iterO = landmark.obs.begin();
        iterO != landmark.obs.end(); ++iterO)
      {
        const IndexT id_view = iterO->first;
        const View * view = sfm_data.GetViews().at(id_view).get();
        if (!sfm_data.IsPoseAndIntrinsicDefined(view))
          continue;

        const Pose3 pose = sfm_data.GetPoseOrDie(view);
        const double z = Depth(pose.rotation(), pose.translation(), X);
        NearFarPlanesT::iterator itZ = z_near_z_far_perView.find(id_view);
        if (itZ != z_near_z_far_perView.end())
        {
          if ( z < itZ->second.first)
            itZ->second.first = z;
          else
          if ( z > itZ->second.second)
            itZ->second.second = z;
        }
        else
          z_near_z_far_perView[id_view] = {z,z};
      }
    }
  }
  else
  {
    // Init the same near & far limit for all the valid views
    for (Views::const_iterator it = sfm_data.GetViews().begin();
    it != sfm_data.GetViews().end(); ++it)
    {
      const View * view = it->second.get();
      if (!sfm_data.IsPoseAndIntrinsicDefined(view))
        continue;
      if (z_near_z_far_perView.find(view->id_view) == z_near_z_far_perView.end())
        z_near_z_far_perView[view->id_view] = {zNear, zFar};
    }
  }
}
Example #5
0
/* ************************************************************************* */
TEST( dataSet, writeBALfromValues_Dubrovnik){

  ///< Read a file using the unit tested readBAL
  const string filenameToRead = findExampleDataFile("dubrovnik-3-7-pre");
  SfM_data readData;
  readBAL(filenameToRead, readData);

  Pose3 poseChange = Pose3(Rot3::ypr(-M_PI/10, 0., -M_PI/10), gtsam::Point3(0.3,0.1,0.3));

  Values value;
  for(size_t i=0; i < readData.number_cameras(); i++){ // for each camera
    Key poseKey = symbol('x',i);
    Pose3 pose = poseChange.compose(readData.cameras[i].pose());
    value.insert(poseKey, pose);
  }
  for(size_t j=0; j < readData.number_tracks(); j++){ // for each point
    Key pointKey = P(j);
    Point3 point = poseChange.transform_from( readData.tracks[j].p );
    value.insert(pointKey, point);
  }

  // Write values and readData to a file
  const string filenameToWrite = createRewrittenFileName(filenameToRead);
  writeBALfromValues(filenameToWrite, readData, value);

  // Read the file we wrote
  SfM_data writtenData;
  readBAL(filenameToWrite, writtenData);

  // Check that the reprojection errors are the same and the poses are correct
  // Check number of things
  EXPECT_LONGS_EQUAL(3,writtenData.number_cameras());
  EXPECT_LONGS_EQUAL(7,writtenData.number_tracks());
  const SfM_Track& track0 = writtenData.tracks[0];
  EXPECT_LONGS_EQUAL(3,track0.number_measurements());

  // Check projection of a given point
  EXPECT_LONGS_EQUAL(0,track0.measurements[0].first);
  const SfM_Camera& camera0 = writtenData.cameras[0];
  Point2 expected = camera0.project(track0.p), actual = track0.measurements[0].second;
  EXPECT(assert_equal(expected,actual,12));

  Pose3 expectedPose = camera0.pose();
  Key poseKey = symbol('x',0);
  Pose3 actualPose = value.at<Pose3>(poseKey);
  EXPECT(assert_equal(expectedPose,actualPose, 1e-7));

  Point3 expectedPoint = track0.p;
  Key pointKey = P(0);
  Point3 actualPoint = value.at<Point3>(pointKey);
  EXPECT(assert_equal(expectedPoint,actualPoint, 1e-6));
}
PinholeCamera<CALIBRATION> perturbCameraPoseAndCalibration(
    const PinholeCamera<CALIBRATION>& camera) {
  GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION)
  Pose3 noise_pose = Pose3(Rot3::ypr(-M_PI / 10, 0., -M_PI / 10),
      Point3(0.5, 0.1, 0.3));
  Pose3 cameraPose = camera.pose();
  Pose3 perturbedCameraPose = cameraPose.compose(noise_pose);
  typename gtsam::traits<CALIBRATION>::TangentVector d;
  d.setRandom();
  d *= 0.1;
  CALIBRATION perturbedCalibration = camera.calibration().retract(d);
  return PinholeCamera<CALIBRATION>(perturbedCameraPose, perturbedCalibration);
}
Example #7
0
void Camera::CameraSensor::updateValue()
{
  // allocate buffer
  const unsigned int imageWidth = camera->imageWidth;
  const unsigned int imageHeight = camera->imageHeight;
  const unsigned int imageSize = imageWidth * imageHeight * 3;
  if(imageBufferSize < imageSize)
  {
    if(imageBuffer)
      delete[] imageBuffer;
    imageBuffer = new unsigned char[imageSize];
    imageBufferSize = imageSize;
  }

  // make sure the poses of all movable objects are up to date
  Simulation::simulation->scene->updateTransformations();

  // prepare offscreen renderer
  OffscreenRenderer& renderer = Simulation::simulation->renderer;
  renderer.makeCurrent(imageWidth, imageHeight);

  // setup image size and angle of view
  glViewport(0, 0, imageWidth, imageHeight);
  glMatrixMode(GL_PROJECTION);
  glLoadMatrixf(projection);
  glMatrixMode(GL_MODELVIEW);
  
  // enable lighting, textures, and smooth shading
  glEnable(GL_LIGHTING);
  glEnable(GL_TEXTURE_2D);
  glPolygonMode(GL_FRONT, GL_FILL);
  glShadeModel(GL_SMOOTH);

  // setup camera position
  Pose3<> pose = physicalObject->pose;
  pose.conc(offset);
  static const Matrix3x3<> cameraRotation(Vector3<>(0.f, -1.f, 0.f), Vector3<>(0.f, 0.f, 1.f), Vector3<>(-1.f, 0.f, 0.f));
  pose.rotate(cameraRotation);
  float transformation[16];
  OpenGLTools::convertTransformation(pose.invert(), transformation);
  glLoadMatrixf(transformation);
  
  // draw all objects
  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
  Simulation::simulation->scene->drawAppearances();

  // read frame buffer
  renderer.finishImageRendering(imageBuffer, imageWidth, imageHeight);
  data.byteArray = imageBuffer;
}
Example #8
0
/* ************************************************************************* */
BearingS2 BearingS2::fromForwardObservation(const Pose3& A, const Point3& B) {
  //  Cnb = DCMnb(Att);
  Matrix Cnb = A.rotation().matrix().transpose();

  Vector p_rel_c = Cnb*(B - A.translation());

  // FIXME: the matlab code checks for p_rel_c(0) greater than

  //  azi = atan2(p_rel_c(2),p_rel_c(1));
  double azimuth = atan2(p_rel_c(1),p_rel_c(0));
  //  elev = atan2(p_rel_c(3),sqrt(p_rel_c(1)^2 + p_rel_c(2)^2));
  double elevation = atan2(p_rel_c(2),sqrt(p_rel_c(0) * p_rel_c(0) + p_rel_c(1) * p_rel_c(1)));
  return BearingS2(azimuth, elevation);
}
Example #9
0
/* ************************************************************************* */
Vector6 Pose3::ChartAtOrigin::Local(const Pose3& T, ChartJacobian H) {
#ifdef GTSAM_POSE3_EXPMAP
  return Logmap(T, H);
#else
  Matrix3 DR;
  Vector3 omega = Rot3::LocalCoordinates(T.rotation(), H ? &DR : 0);
  if (H) {
    *H = I_6x6;
    H->topLeftCorner<3,3>() = DR;
  }
  Vector6 xi;
  xi << omega, T.translation().vector();
  return xi;
#endif
}
/// Camera pair epipole (Projection of camera center 2 in the image plane 1)
static Vec3 epipole_from_P(const Mat34& P1, const Pose3& P2)
{
  const Vec3 c = P2.center();
  Vec4 center;
  center << c(0), c(1), c(2), 1.0;
  return P1*center;
}
Example #11
0
/* ************************************************************************* */
Vector6 Pose3::Logmap(const Pose3& p, OptionalJacobian<6, 6> H) {
  if (H) *H = LogmapDerivative(p);
  Vector3 w = Rot3::Logmap(p.rotation()), T = p.translation().vector();
  double t = w.norm();
  if (t < 1e-10) {
    Vector6 log;
    log << w, T;
    return log;
  } else {
    Matrix3 W = skewSymmetric(w / t);
    // Formula from Agrawal06iros, equation (14)
    // simplified with Mathematica, and multiplying in T to avoid matrix math
    double Tan = tan(0.5 * t);
    Vector3 WT = W * T;
    Vector3 u = T - (0.5 * t) * WT + (1 - t / (2. * Tan)) * (W * WT);
    Vector6 log;
    log << w, u;
    return log;
  }
}
Example #12
0
/* ************************************************************************* */
BearingS2 BearingS2::fromDownwardsObservation(const Pose3& A, const Point3& B) {
  //  Cnb = DCMnb(Att);
  Matrix Cnb = A.rotation().matrix().transpose();

  //  Cbc = [0,0,1;0,1,0;-1,0,0];
  Matrix Cbc = (Matrix(3,3) <<
      0.,0.,1.,
      0.,1.,0.,
      -1.,0.,0.).finished();
  //  p_rel_c = Cbc*Cnb*(PosObj - Pos);
  Vector p_rel_c = Cbc*Cnb*(B - A.translation());

  // FIXME: the matlab code checks for p_rel_c(0) greater than

  //  azi = atan2(p_rel_c(2),p_rel_c(1));
  double azimuth = atan2(p_rel_c(1),p_rel_c(0));
  //  elev = atan2(p_rel_c(3),sqrt(p_rel_c(1)^2 + p_rel_c(2)^2));
  double elevation = atan2(p_rel_c(2),sqrt(p_rel_c(0) * p_rel_c(0) + p_rel_c(1) * p_rel_c(1)));
  return BearingS2(azimuth, elevation);
}
Example #13
0
/* ************************************************************************* */
PoseRTV PoseRTV::transformed_from(const Pose3& trans, ChartJacobian Dglobal,
    OptionalJacobian<9, 6> Dtrans) const {

  // Pose3 transform is just compose
  Matrix6 D_newpose_trans, D_newpose_pose;
  Pose3 newpose = trans.compose(pose(), D_newpose_trans, D_newpose_pose);

  // Note that we rotate the velocity
  Matrix3 D_newvel_R, D_newvel_v;
  Velocity3 newvel = trans.rotation().rotate(Point3(velocity()), D_newvel_R, D_newvel_v);

  if (Dglobal) {
    Dglobal->setZero();
    Dglobal->topLeftCorner<6,6>() = D_newpose_pose;
    Dglobal->bottomRightCorner<3,3>() = D_newvel_v;
  }

  if (Dtrans) {
    Dtrans->setZero();
    Dtrans->topLeftCorner<6,6>() = D_newpose_trans;
    Dtrans->bottomLeftCorner<3,3>() = D_newvel_R;
  }
  return PoseRTV(newpose, newvel);
}
// Init a frustum for each valid views of the SfM scene
void Frustum_Filter::initFrustum
(
  const SfM_Data & sfm_data
)
{
  for (NearFarPlanesT::const_iterator it = z_near_z_far_perView.begin();
      it != z_near_z_far_perView.end(); ++it)
  {
    const View * view = sfm_data.GetViews().at(it->first).get();
    if (!sfm_data.IsPoseAndIntrinsicDefined(view))
      continue;
    Intrinsics::const_iterator iterIntrinsic = sfm_data.GetIntrinsics().find(view->id_intrinsic);
    if (!isPinhole(iterIntrinsic->second->getType()))
      continue;

    const Pose3 pose = sfm_data.GetPoseOrDie(view);

    const Pinhole_Intrinsic * cam = dynamic_cast<const Pinhole_Intrinsic*>(iterIntrinsic->second.get());
    if (!cam)
      continue;

    if (!_bTruncated) // use infinite frustum
    {
      const Frustum f(
        cam->w(), cam->h(), cam->K(),
        pose.rotation(), pose.center());
      frustum_perView[view->id_view] = f;
    }
    else // use truncated frustum with defined Near and Far planes
    {
      const Frustum f(cam->w(), cam->h(), cam->K(),
        pose.rotation(), pose.center(), it->second.first, it->second.second);
      frustum_perView[view->id_view] = f;
    }
  }
}
Example #15
0
int main()
{
  int n = 5000000;
  cout << "NOTE:  Times are reported for " << n << " calls" << endl;

  double norm=sqrt(1.0+16.0+4.0);
  double x=1.0/norm, y=4.0/norm, z=2.0/norm;
  Vector v = (Vector(6) << x, y, z, 0.1, 0.2, -0.1).finished();
  Pose3 T = Pose3::Expmap((Vector(6) << 0.1, 0.1, 0.2, 0.1, 0.4, 0.2).finished()), T2 = T.retract(v);
  Matrix H1,H2;

  TEST(retract, T.retract(v))
  TEST(Expmap, T*Pose3::Expmap(v))
  TEST(localCoordinates, T.localCoordinates(T2))
  TEST(between, T.between(T2))
  TEST(between_derivatives, T.between(T2,H1,H2))
  TEST(Logmap, Pose3::Logmap(T.between(T2)))

  // Print timings
  tictoc_print_();

  return 0;
}
Example #16
0
bool Camera::CameraSensor::renderCameraImages(SimRobotCore2::Sensor** cameras, unsigned int count)
{
  if(lastSimulationStep == Simulation::simulation->simulationStep)
    return true;

  // allocate buffer
  const unsigned int imageWidth = camera->imageWidth;
  const unsigned int imageHeight = camera->imageHeight;
  const unsigned int imageSize = imageWidth * imageHeight * 3;
  const unsigned int multiImageBufferSize = imageSize * count;
  if(imageBufferSize < multiImageBufferSize)
  {
    if(imageBuffer)
      delete[] imageBuffer;
    imageBuffer = new unsigned char[multiImageBufferSize];
    imageBufferSize = multiImageBufferSize;
  }

  // make sure the poses of all movable objects are up to date
  Simulation::simulation->scene->updateTransformations();

  // prepare offscreen renderer
  OffscreenRenderer& renderer = Simulation::simulation->renderer;
  renderer.makeCurrent(imageWidth, imageHeight * count);

  // setup angle of view
  glMatrixMode(GL_PROJECTION);
  glLoadMatrixf(projection);
  glMatrixMode(GL_MODELVIEW);
  
  // enable lighting, textures, and smooth shading
  glEnable(GL_LIGHTING);
  glEnable(GL_TEXTURE_2D);
  glPolygonMode(GL_FRONT, GL_FILL);
  glShadeModel(GL_SMOOTH);

  // clear buffers
  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

  // render images
  int currentHorizontalPos = 0;
  unsigned char* currentBufferPos = imageBuffer;
  for(unsigned int i = 0; i < count; ++i)
  {
    CameraSensor* sensor = (CameraSensor*)cameras[i];
    ASSERT(sensor->lastSimulationStep != Simulation::simulation->simulationStep);

    glViewport(0, currentHorizontalPos, imageWidth, imageHeight);

    // setup camera position
    Pose3<> pose = sensor->physicalObject->pose;
    pose.conc(sensor->offset);
    static const Matrix3x3<> cameraRotation(Vector3<>(0.f, -1.f, 0.f), Vector3<>(0.f, 0.f, 1.f), Vector3<>(-1.f, 0.f, 0.f));
    pose.rotate(cameraRotation);
    float transformation[16];
    OpenGLTools::convertTransformation(pose.invert(), transformation);
    glLoadMatrixf(transformation);
  
    // draw all objects
    Simulation::simulation->scene->drawAppearances();

    sensor->data.byteArray = currentBufferPos;
    sensor->lastSimulationStep = Simulation::simulation->simulationStep;

    currentHorizontalPos += imageHeight;
    currentBufferPos += imageSize;
  }

  // read frame buffer
  renderer.finishImageRendering(imageBuffer, imageWidth, imageHeight * count);
  return true;
}
 Vector evaluateError(const Pose3& q, boost::optional<Matrix&> H = boost::none) const
 {
   if (H)
     (*H) = (Matrix(1, 6) << 0.0, 0.0, 1.0, 0.0, 0.0, 0.0).finished();
   return (Vector(1) << (q.rotation().yaw() - yaw_)).finished();
 }
Example #18
0
 Pose3 operator () (const Pose3 & pose) const
 {
   return Pose3( pose.rotation() * _pose.rotation().transpose(), this->operator()(pose.center()));
 }
Example #19
0
/* OpenGL draw function & timing */
static void draw(void)
{
  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

  glMatrixMode(GL_MODELVIEW);
  glLoadIdentity();

  {
    // convert opengl coordinates into the document information coordinates
    glPushMatrix();
    glMultMatrixf((GLfloat*)m_convert);

    // apply view offset
    openMVG::Mat4 offset_w = l2w_Camera(Mat3::Identity(), Vec3(x_offset,y_offset,z_offset));
    glMultMatrixd((GLdouble*)offset_w.data());

    // then apply current camera transformation
    const View * view = sfm_data.GetViews().at(vec_cameras[current_cam]).get();
    const Pose3 pose = sfm_data.GetPoseOrDie(view);
    const openMVG::Mat4 l2w = l2w_Camera(pose.rotation(), pose.translation());

    glPushMatrix();
    glMultMatrixd((GLdouble*)l2w.data());

    glPointSize(3);
    glDisable(GL_TEXTURE_2D);
    glDisable(GL_LIGHTING);

    //Draw Structure in GREEN (as seen from the current camera)
    size_t nbPoint = sfm_data.GetLandmarks().size();
    size_t cpt = 0;
    glBegin(GL_POINTS);
    glColor3f(0.f,1.f,0.f);
    for (Landmarks::const_iterator iter = sfm_data.GetLandmarks().begin();
      iter != sfm_data.GetLandmarks().end(); ++iter)
    {
      const Landmark & landmark = iter->second;
      glVertex3d(landmark.X(0), landmark.X(1), landmark.X(2));
    }
    glEnd();

    glDisable(GL_CULL_FACE);

    for (int i_cam=0; i_cam < vec_cameras.size(); ++i_cam)
    {
      const View * view = sfm_data.GetViews().at(vec_cameras[i_cam]).get();
      const Pose3 pose = sfm_data.GetPoseOrDie(view);
      const IntrinsicBase * cam = sfm_data.GetIntrinsics().at(view->id_intrinsic).get();
      if (isPinhole(cam->getType()))
      {
        const Pinhole_Intrinsic * camPinhole = dynamic_cast<const Pinhole_Intrinsic*>(cam);

        // Move frame to draw the camera i_cam by applying its inverse transformation
        // Warning: translation has to be "fixed" to remove the current camera rotation

        // Fix camera_i translation with current camera rotation inverse
        const Vec3 trans = pose.rotation().transpose() * pose.translation();

        // compute inverse transformation matrix from local to world
        const openMVG::Mat4 l2w_i = l2w_Camera(pose.rotation().transpose(), -trans);
        // stack it and use it
        glPushMatrix();
        glMultMatrixd((GLdouble*)l2w_i.data());

        // 1. Draw optical center (RED) and image center (BLUE)
        glPointSize(3);
        glDisable(GL_TEXTURE_2D);
        glDisable(GL_LIGHTING);

        glBegin(GL_POINTS);
        glColor3f(1.f,0.f,0.f);
        glVertex3f(0, 0, 0); // optical center
        glColor3f(0.f,0.f,1.f);
        glVertex3f(0, 0, normalized_focal); // image center
        glEnd();

        // compute image corners coordinated with normalized focal (f=normalized_focal)
        const int w = camPinhole->w();
        const int h = camPinhole->h();

        const double focal = camPinhole->focal();
        // use principal point to adjust image center
        const Vec2 pp = camPinhole->principal_point();

        Vec3 c1(    -pp[0]/focal * normalized_focal, (-pp[1]+h)/focal * normalized_focal, normalized_focal);
        Vec3 c2((-pp[0]+w)/focal * normalized_focal, (-pp[1]+h)/focal * normalized_focal, normalized_focal);
        Vec3 c3((-pp[0]+w)/focal * normalized_focal,     -pp[1]/focal * normalized_focal, normalized_focal);
        Vec3 c4(    -pp[0]/focal * normalized_focal,     -pp[1]/focal * normalized_focal, normalized_focal);

        // 2. Draw thumbnail
        if (i_cam == current_cam)
        {
          glEnable(GL_TEXTURE_2D);
          glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
          glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

          glBindTexture(GL_TEXTURE_2D, m_image_vector[i_cam].texture);

          glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
          glEnable(GL_BLEND);
          glDisable(GL_DEPTH_TEST);

          if (i_cam == current_cam) {
            glColor4f(0.5f,0.5f,0.5f, 0.7f);
          } else {
            glColor4f(0.5f,0.5f,0.5f, 0.5f);
          }

          glBegin(GL_QUADS);
          glTexCoord2d(0.0,1.0);    glVertex3d(c1[0], c1[1], c1[2]);
          glTexCoord2d(1.0,1.0);    glVertex3d(c2[0], c2[1], c2[2]);
          glTexCoord2d(1.0,0.0);    glVertex3d(c3[0], c3[1], c3[2]);
          glTexCoord2d(0.0,0.0);    glVertex3d(c4[0], c4[1], c4[2]);
          glEnd();

          glDisable(GL_TEXTURE_2D);
          glDisable(GL_BLEND); glEnable(GL_DEPTH_TEST);
        }

       // 3. Draw camera cone
        if (i_cam == current_cam) {
          glColor3f(1.f,1.f,0.f);
        } else {
          glColor3f(1.f,0.f,0.f);
        }
        glBegin(GL_LINES);
        glVertex3d(0.0,0.0,0.0); glVertex3d(c1[0], c1[1], c1[2]);
        glVertex3d(0.0,0.0,0.0); glVertex3d(c2[0], c2[1], c2[2]);
        glVertex3d(0.0,0.0,0.0); glVertex3d(c3[0], c3[1], c3[2]);
        glVertex3d(0.0,0.0,0.0); glVertex3d(c4[0], c4[1], c4[2]);
        glVertex3d(c1[0], c1[1], c1[2]); glVertex3d(c2[0], c2[1], c2[2]);
        glVertex3d(c2[0], c2[1], c2[2]); glVertex3d(c3[0], c3[1], c3[2]);
        glVertex3d(c3[0], c3[1], c3[2]); glVertex3d(c4[0], c4[1], c4[2]);
        glVertex3d(c4[0], c4[1], c4[2]); glVertex3d(c1[0], c1[1], c1[2]);
        glEnd();

        glPopMatrix(); // go back to current camera frame
      }
    }
    glPopMatrix(); // go back to (document +offset) frame
    glPopMatrix(); // go back to identity
  }
}
Example #20
0
bool CreateImageFile( const SfM_Data & sfm_data,
                      const std::string & sImagesFilename)
{
 /* images.txt
      # Image list with two lines of data per image:
      #   IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME
      #   POINTS2D[] as (X, Y, POINT3D_ID)
      # Number of images: X, mean observations per image: Y
  */

  // Header
  std::ofstream images_file( sImagesFilename );

  if ( ! images_file )
  {
    std::cerr << "Cannot write file" << sImagesFilename << std::endl;
    return false;
  }
  images_file << "# Image list with two lines of data per image:\n";
  images_file << "#   IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n";
  images_file << "#   POINTS2D[] as (X, Y, POINT3D_ID)\n";
  images_file << "# Number of images: X, mean observations per image: Y\n";

  std::map< IndexT, std::vector< std::tuple<double, double, IndexT> > > viewIdToPoints2D;
  const Landmarks & landmarks = sfm_data.GetLandmarks();
  {
    for ( Landmarks::const_iterator iterLandmarks = landmarks.begin();
          iterLandmarks != landmarks.end(); ++iterLandmarks)
    {
      const IndexT point3d_id = iterLandmarks->first;

      // Tally set of feature observations
      const Observations & obs = iterLandmarks->second.obs;
      for ( Observations::const_iterator itObs = obs.begin(); itObs != obs.end(); ++itObs )
      {
        const IndexT currentViewId = itObs->first;
        const Observation & ob = itObs->second;
        viewIdToPoints2D[currentViewId].push_back(std::make_tuple(ob.x( 0 ), ob.x( 1 ), point3d_id));
      }
    }
  }

  {
    C_Progress_display my_progress_bar( sfm_data.GetViews().size(), std::cout, "\n- CREATE IMAGE FILE -\n" );

    for (Views::const_iterator iter = sfm_data.GetViews().begin();
         iter != sfm_data.GetViews().end(); ++iter, ++my_progress_bar)
    {
      const View * view = iter->second.get();

      if ( !sfm_data.IsPoseAndIntrinsicDefined( view ) )
      {
        continue;
      }

      const Pose3 pose = sfm_data.GetPoseOrDie( view );
      const Mat3 rotation = pose.rotation();
      const Vec3 translation = pose.translation();

      const double Tx = translation[0];
      const double Ty = translation[1];
      const double Tz = translation[2];
      Eigen::Quaterniond q( rotation );
      const double Qx = q.x();
      const double Qy = q.y();
      const double Qz = q.z();
      const double Qw = q.w();

      const IndexT image_id = view->id_view;
      // Colmap's camera_ids correspond to openMVG's intrinsic ids
      const IndexT camera_id = view->id_intrinsic;                           
      const std::string image_name = view->s_Img_path;

      // first line per image
      //IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME
      images_file << image_id << " "
         << Qw << " "
         << Qx << " "
         << Qy << " "
         << Qz << " "
         << Tx << " "
         << Ty << " "
         << Tz << " "
         << camera_id << " "
         << image_name << " "     
         << "\n";

      // second line per image 
      //POINTS2D[] as (X, Y, POINT3D_ID)
      for (auto point2D: viewIdToPoints2D[image_id]) 
      {
        images_file << std::get<0>(point2D) << " " << 
        std::get<1>(point2D) << " " <<
        std::get<2>(point2D) << " ";
      }
      images_file << "\n";
    }
  }
  return true;
}
/// Robustly try to estimate the best 3D point using a ransac Scheme
/// Return true for a successful triangulation
bool SfM_Data_Structure_Computation_Robust::robust_triangulation(
  const SfM_Data & sfm_data,
  const Observations & obs,
  Vec3 & X,
  const IndexT min_required_inliers,
  const IndexT min_sample_index) const
{
  const double dThresholdPixel = 4.0; // TODO: make this parameter customizable

  const IndexT nbIter = obs.size(); // TODO: automatic computation of the number of iterations?

  // - Ransac variables
  Vec3 best_model;
  std::set<IndexT> best_inlier_set;
  double best_error = std::numeric_limits<double>::max();

  // - Ransac loop
  for (IndexT i = 0; i < nbIter; ++i)
  {
    std::vector<size_t> vec_samples;
    robust::UniformSample(min_sample_index, obs.size(), &vec_samples);
    const std::set<IndexT> samples(vec_samples.begin(), vec_samples.end());

    // Hypothesis generation.
    const Vec3 current_model = track_sample_triangulation(sfm_data, obs, samples);

    // Test validity of the hypothesis
    // - chierality (for the samples)
    // - residual error

    // Chierality (Check the point is in front of the sampled cameras)
    bool bChierality = true;
    for (auto& it : samples){
      Observations::const_iterator itObs = obs.begin();
      std::advance(itObs, it);
      const View * view = sfm_data.views.at(itObs->first).get();
      const IntrinsicBase * cam = sfm_data.GetIntrinsics().at(view->id_intrinsic).get();
      const Pose3 pose = sfm_data.GetPoseOrDie(view);
      const double z = pose.depth(current_model); // TODO: cam->depth(pose(X));
      bChierality &= z > 0;
    }

    if (!bChierality)
      continue;

    std::set<IndexT> inlier_set;
    double current_error = 0.0;
    // Classification as inlier/outlier according pixel residual errors.
    for (Observations::const_iterator itObs = obs.begin();
        itObs != obs.end(); ++itObs)
    {
      const View * view = sfm_data.views.at(itObs->first).get();
      const IntrinsicBase * intrinsic = sfm_data.GetIntrinsics().at(view->id_intrinsic).get();
      const Pose3 pose = sfm_data.GetPoseOrDie(view);
      const Vec2 residual = intrinsic->residual(pose, current_model, itObs->second.x);
      const double residual_d = residual.norm();
      if (residual_d < dThresholdPixel)
      {
        inlier_set.insert(itObs->first);
        current_error += residual_d;
      }
      else
      {
        current_error += dThresholdPixel;
      }
    }
    // Does the hypothesis is the best one we have seen and have sufficient inliers.
    if (current_error < best_error && inlier_set.size() >= min_required_inliers)
    {
      X = best_model = current_model;
      best_inlier_set = inlier_set;
      best_error = current_error;
    }
  }
  return !best_inlier_set.empty();
}
void DepthImageSensor::DistanceSensor::updateValue()
{
  // make sure the poses of all movable objects are up to date
  Simulation::simulation->scene->updateTransformations();
  
  OffscreenRenderer& renderer = Simulation::simulation->renderer;

  renderer.makeCurrent(renderWidth, renderHeight);
  glViewport(0, 0, renderWidth, renderHeight);

  // setup image size and angle of view
  glMatrixMode(GL_PROJECTION);
  glLoadMatrixf(projection);
  glMatrixMode(GL_MODELVIEW);
  
  // disable lighting and textures, and use flat shading
  glDisable(GL_LIGHTING);
  glDisable(GL_TEXTURE_2D);
  glPolygonMode(GL_FRONT, GL_FILL);
  glShadeModel(GL_FLAT);

  // setup camera position
  Pose3<> pose = physicalObject->pose;
  pose.conc(offset);
  static const Matrix3x3<> cameraRotation(Vector3<>(0.f, -1.f, 0.f), Vector3<>(0.f, 0.f, 1.f), Vector3<>(-1.f, 0.f, 0.f));
  pose.rotate(cameraRotation);
  pose.rotate(Matrix3x3<>(Vector3<>(0, (depthImageSensor->angleX - renderAngleX) / 2.0f, 0)));
  
  float* val = imageBuffer;
  unsigned int widthLeft = depthImageSensor->imageWidth;
  for(unsigned int i = 0; i < numOfBuffers; ++i)
  {
    float transformation[16];
    OpenGLTools::convertTransformation(pose.invert(), transformation);
    glLoadMatrixf(transformation);

    // disable color rendering
    glColorMask(0, 0, 0, 0);
    
    // draw all objects
    glClear(GL_DEPTH_BUFFER_BIT);
    Simulation::simulation->scene->drawAppearances();

    // enable color rendering again
    glColorMask(1, 1, 1, 1);

    // read frame buffer
    renderer.finishDepthRendering(renderBuffer, renderWidth, renderHeight);

    if(depthImageSensor->projection == perspectiveProjection)
    {
      // convert pixels to points in world and compute the depth (renderBuffer == imageBuffer)
      const float halfP34 = projection[14] * 0.5f;
      const float halfP33m1 = projection[10] * 0.5f - 0.5f;
      for(float* end = val + renderWidth * renderHeight; val < end; ++val)
        *val = halfP34 / (*val + halfP33m1);
    }
    else
    {
      // convert pixels to points in world and compute the distances (renderBuffer != imageBuffer)
      const float fInvSqr = 1.f / (projection[0] * projection[0]);
      const float halfP34 = projection[14] * 0.5f;
      const float halfP33m1 = projection[10] * 0.5f - 0.5f;
      float* const mid = lut[bufferWidth / 2];
      const float factor = 2.0f / float(renderWidth);
      const unsigned int end = std::min(bufferWidth, widthLeft);
      for(unsigned int i = 0; i < end; ++i)
      {
        const float vx = (lut[i] - mid) * factor;
        *val++ = std::min<float>(halfP34 / (*lut[i] + halfP33m1) * sqrt(1.f + vx * vx * fInvSqr), max);
      }
      widthLeft -= end;
      pose.rotate(Matrix3x3<>(Vector3<>(0, -renderAngleX, 0)));
    }
  }
}
 karto::String StringHelper::ToString(const Pose3& rValue)
 {
   return rValue.ToString();
 }
int main(int argc, char **argv)
{
  CmdLine cmd;

  std::string sSfM_Data_Filename;
  std::string sOutDir = "";

  cmd.add( make_option('i', sSfM_Data_Filename, "sfmdata") );
  cmd.add( make_option('o', sOutDir, "outdir") );

  try {
      if (argc == 1) throw std::string("Invalid command line parameter.");
      cmd.process(argc, argv);
  } catch(const std::string& s) {
      std::cerr << "Usage: " << argv[0] << '\n'
      << "[-i|--sfmdata] filename, the SfM_Data file to convert\n"
      << "[-o|--outdir] path.\n"
      << std::endl;

      std::cerr << s << std::endl;
      return EXIT_FAILURE;
  }

  std::cout << " You called : " <<std::endl
            << argv[0] << std::endl
            << "--sfmdata " << sSfM_Data_Filename << std::endl
            << "--outdir " << sOutDir << std::endl;

  bool bOneHaveDisto = false;
  
  // Create output dir
  if (!stlplus::folder_exists(sOutDir))
    stlplus::folder_create( sOutDir );

  // Read the SfM scene
  SfM_Data sfm_data;
  if (!Load(sfm_data, sSfM_Data_Filename, ESfM_Data(VIEWS|INTRINSICS|EXTRINSICS))) {
    std::cerr << std::endl
      << "The input SfM_Data file \""<< sSfM_Data_Filename << "\" cannot be read." << std::endl;
    return EXIT_FAILURE;
  }

  for(Views::const_iterator iter = sfm_data.GetViews().begin();
      iter != sfm_data.GetViews().end(); ++iter)
  {
    const View * view = iter->second.get();
    if (!sfm_data.IsPoseAndIntrinsicDefined(view))
        continue;
    
    // Valid view, we can ask a pose & intrinsic data
    const Pose3 pose = sfm_data.GetPoseOrDie(view);
    Intrinsics::const_iterator iterIntrinsic = sfm_data.GetIntrinsics().find(view->id_intrinsic);
    const IntrinsicBase * cam = iterIntrinsic->second.get();
    
    if (!cameras::isPinhole(cam->getType()))
        continue;
    const Pinhole_Intrinsic * pinhole_cam = static_cast<const Pinhole_Intrinsic *>(cam);
    
    // Extrinsic
    const Vec3 t = pose.translation();
    const Mat3 R = pose.rotation();
    // Intrinsic
    const double f = pinhole_cam->focal();
    const Vec2 pp = pinhole_cam->principal_point();

    // Image size in px
    const int w = pinhole_cam->w();
    const int h = pinhole_cam->h();
    
    // We can now create the .cam file for the View in the output dir 
    std::ofstream outfile( stlplus::create_filespec(
                sOutDir, stlplus::basename_part(view->s_Img_path), "cam" ).c_str() );
    // See https://github.com/nmoehrle/mvs-texturing/blob/master/Arguments.cpp
    // for full specs
    const int largerDim = w > h ? w : h;
    outfile << t(0) << " " << t(1) << " " << t(2) << " "
        << R(0,0) << " " << R(0,1) << " " << R(0,2) << " "
        << R(1,0) << " " << R(1,1) << " " << R(1,2) << " "
        << R(2,0) << " " << R(2,1) << " " << R(2,2) << "\n"
        << f / largerDim << " 0 0 1 " << pp(0) / w << " " << pp(1) / h;
    outfile.close();
    
    if(cam->have_disto())
      bOneHaveDisto = true;
  }
  
  const string sUndistMsg = bOneHaveDisto ? "undistorded" : "";
  const string sQuitMsg = std::string("Your SfM_Data file was succesfully converted!\n") + 
	  "Now you can copy your " + sUndistMsg + " images in the \"" + sOutDir + "\" directory and run MVS Texturing";
  std::cout << sQuitMsg << std::endl;
  return EXIT_SUCCESS;
}
void StateEstimator::optimizationLoop()
{
  ISAM2Params parameters;
  // parameters.relinearizeThreshold = 0.0; // Set the relin threshold to zero such that the batch estimate is recovered
  // parameters.relinearizeSkip = 1; // Relinearize every time
  gtsam::IncrementalFixedLagSmoother graph(optLag_, parameters);

  double startTime;
  sensor_msgs::ImuConstPtr lastImu;
  double lastImuT;
  int imuKey = 1;
  int gpsKey = 1;

  // first we will initialize the graph with appropriate priors
  NonlinearFactorGraph newFactors;
  Values newVariables;
  FixedLagSmoother::KeyTimestampMap newTimestamps;

  sensor_msgs::NavSatFixConstPtr fix = gpsQ_.pop();
  startTime = ROS_TIME(fix);
  enu_.Reset(fix->latitude, fix->longitude, fix->altitude);

  sensor_msgs::ImuConstPtr imu = imuQ_.pop();
  lastImu = imu;
  lastImuT = ROS_TIME(imu) - 1 / imuFreq_;
  Rot3 initialOrientation =
      Rot3::Quaternion(imu->orientation.w, imu->orientation.x, imu->orientation.y, imu->orientation.z);

  // we set out initial position to the origin and assume we are stationary
  Pose3 x0(initialOrientation, Point3(0, 0, 0));
  PriorFactor<Pose3> priorPose(X(0), x0,
                               noiseModel::Diagonal::Sigmas((Vector(6) << priorOSigma_, priorOSigma_, priorOSigma_,
                                                             priorPSigma_, priorPSigma_, priorPSigma_)
                                                                .finished()));
  newFactors.add(priorPose);

  Vector3 v0 = Vector3(0, 0, 0);
  PriorFactor<Vector3> priorVel(
      V(0), v0, noiseModel::Diagonal::Sigmas((Vector(3) << priorVSigma_, priorVSigma_, priorVSigma_).finished()));
  newFactors.add(priorVel);

  imuBias::ConstantBias b0((Vector(6) << 0, 0, 0, 0, 0, 0).finished());
  PriorFactor<imuBias::ConstantBias> priorBias(
      B(0), b0,
      noiseModel::Diagonal::Sigmas(
          (Vector(6) << priorABias_, priorABias_, priorABias_, priorGBias_, priorGBias_, priorGBias_).finished()));
  newFactors.add(priorBias);

  noiseModel::Diagonal::shared_ptr imuToGpsFactorNoise = noiseModel::Diagonal::Sigmas(
      (Vector(6) << gpsTSigma_, gpsTSigma_, gpsTSigma_, gpsTSigma_, gpsTSigma_, gpsTSigma_).finished());
  newFactors.add(BetweenFactor<Pose3>(X(0), G(0), imuToGps_, imuToGpsFactorNoise));

  newVariables.insert(X(0), x0);
  newVariables.insert(V(0), v0);
  newVariables.insert(B(0), b0);
  newVariables.insert(G(0), x0.compose(imuToGps_));

  newTimestamps[X(0)] = 0;
  newTimestamps[G(0)] = 0;
  newTimestamps[V(0)] = 0;
  newTimestamps[B(0)] = 0;

  graph.update(newFactors, newVariables);  //, newTimestamps);

  Pose3 prevPose = prevPose_ = x0;
  Vector3 prevVel = prevVel_ = v0;
  imuBias::ConstantBias prevBias = prevBias_ = b0;

  // remove old imu messages
  while (!imuQ_.empty() && ROS_TIME(imuQ_.front()) < ROS_TIME(fix))
  {
    lastImuT = ROS_TIME(lastImu);
    lastImu = imuQ_.pop();
  }

  // setting up the IMU integration
  boost::shared_ptr<gtsam::PreintegrationParams> preintegrationParams =
      PreintegrationParams::MakeSharedU(gravityMagnitude_);
  preintegrationParams->accelerometerCovariance = accelSigma_ * I_3x3;
  preintegrationParams->gyroscopeCovariance = gyroSigma_ * I_3x3;
  preintegrationParams->integrationCovariance = imuIntSigma_ * I_3x3;

  PreintegratedImuMeasurements imuIntegrator(preintegrationParams, prevBias);

  Vector noiseModelBetweenBias =
      (Vector(6) << accelBSigma_, accelBSigma_, accelBSigma_, gyroBSigma_, gyroBSigma_, gyroBSigma_).finished();
  SharedDiagonal gpsNoise = noiseModel::Diagonal::Sigmas(Vector3(gpsSigma_, gpsSigma_, 3 * gpsSigma_));

  newFactors.resize(0);
  newVariables.clear();
  newTimestamps.clear();

  // now we loop and let use the queues to grab messages
  while (ros::ok())
  {
    bool optimize = false;

    // integrate imu messages
    while (!imuQ_.empty() && ROS_TIME(imuQ_.back()) > (startTime + 0.1 * imuKey) && !optimize)
    {
      double curTime = startTime + 0.1 * imuKey;
      // we reset the integrator, then integrate
      imuIntegrator.resetIntegrationAndSetBias(prevBias);

      while (ROS_TIME(lastImu) < curTime)
      {
        double dt = ROS_TIME(lastImu) - lastImuT;
        imuIntegrator.integrateMeasurement(
            Vector3(lastImu->linear_acceleration.x, lastImu->linear_acceleration.y, lastImu->linear_acceleration.z),
            Vector3(lastImu->angular_velocity.x, lastImu->angular_velocity.y, lastImu->angular_velocity.z), dt);
        lastImuT = ROS_TIME(lastImu);
        lastImu = imuQ_.pop();
      }

      // now put this into the graph
      ImuFactor imuf(X(imuKey - 1), V(imuKey - 1), X(imuKey), V(imuKey), B(imuKey - 1), imuIntegrator);
      newFactors.add(imuf);
      newFactors.add(BetweenFactor<imuBias::ConstantBias>(
          B(imuKey - 1), B(imuKey), imuBias::ConstantBias(),
          noiseModel::Diagonal::Sigmas(sqrt(imuIntegrator.deltaTij()) * noiseModelBetweenBias)));

      Rot3 orientation = Rot3::Quaternion(lastImu->orientation.w, lastImu->orientation.x, lastImu->orientation.y,
                                          lastImu->orientation.z);
      // std::cout << "adding orientation: " << orientation.xyz() << std::endl;
      newFactors.add(UnaryRotationFactor(X(imuKey), orientation.yaw(),
                                         noiseModel::Diagonal::Sigmas((Vector(1) << yawSigma_).finished())));

      NavState cur(prevPose, prevVel);
      NavState next = imuIntegrator.predict(cur, prevBias);
      prevPose = next.pose();
      prevVel = next.v();
      newVariables.insert(X(imuKey), prevPose);
      newVariables.insert(G(imuKey), prevPose.compose(imuToGps_));
      newVariables.insert(V(imuKey), prevVel);
      newVariables.insert(B(imuKey), prevBias);

      Pose3 temp = prevPose.compose(imuToGps_);
      std::cout << "imu(" << imuKey << "): " << temp.x() << " " << temp.y() << " " << temp.z() << std::endl;

      // for marginalizing out past the time window
      newTimestamps[X(imuKey)] = 0.1 * imuKey;
      newTimestamps[G(imuKey)] = 0.1 * imuKey;
      newTimestamps[V(imuKey)] = 0.1 * imuKey;
      newTimestamps[B(imuKey)] = 0.1 * imuKey;
      ++imuKey;
      optimize = true;
    }

    while (!gpsQ_.empty() && gpsKey < imuKey && optimize && ROS_TIME(gpsQ_.back()) > (startTime + gpsKey * 0.1))
    {
      fix = gpsQ_.pop();
      // we don't want all gps messages, just ones that are very close to the factors (10 hz)
      if (std::abs(ROS_TIME(fix) - (startTime + gpsKey * 0.1)) > 1e-2)
        continue;

      double E, N, U;
      enu_.Forward(fix->latitude, fix->longitude, fix->altitude, E, N, U);
      // we should maybe do a check on the GPS to make sure it's valid
      newFactors.add(GPSFactor(G(gpsKey), Point3(E, N, U), gpsNoise));
      newFactors.add(BetweenFactor<Pose3>(X(gpsKey), G(gpsKey), imuToGps_, imuToGpsFactorNoise));
      std::cout << "gps(" << gpsKey << "): " << E << " " << N << " " << U << std::endl;
      ++gpsKey;
    }

    if (!optimize)
      continue;

    try
    {
      graph.update(newFactors, newVariables);  //, newTimestamps);

      prevPose = graph.calculateEstimate<Pose3>(X(imuKey - 1));
      prevVel = graph.calculateEstimate<Vector3>(V(imuKey - 1));
      prevBias = graph.calculateEstimate<imuBias::ConstantBias>(B(imuKey - 1));

      // pass this to the other thread
      {
        std::lock_guard<std::mutex> lock(mutex_);
        prevPose_ = prevPose;
        prevVel_ = prevVel;
        prevBias_ = prevBias;
        currentTime_ = (imuKey - 1) * 0.1 + startTime;
        doneFirstOpt_ = true;
      }
    }
    catch (IndeterminantLinearSystemException ex)
    {
      // optimization blew up, not much to do just warn user
      ROS_ERROR("Indeterminant linear system error");
    }

    newFactors.resize(0);
    newVariables.clear();
    newTimestamps.clear();
  }
}
int main(int argc, char** argv){

  Values initial_estimate;
  NonlinearFactorGraph graph;
  const noiseModel::Isotropic::shared_ptr model = noiseModel::Isotropic::Sigma(3,1);

  string calibration_loc = findExampleDataFile("VO_calibration.txt");
  string pose_loc = findExampleDataFile("VO_camera_poses_large.txt");
  string factor_loc = findExampleDataFile("VO_stereo_factors_large.txt");
  
  //read camera calibration info from file
  // focal lengths fx, fy, skew s, principal point u0, v0, baseline b
  double fx, fy, s, u0, v0, b;
  ifstream calibration_file(calibration_loc.c_str());
  cout << "Reading calibration info" << endl;
  calibration_file >> fx >> fy >> s >> u0 >> v0 >> b;

  //create stereo camera calibration object
  const Cal3_S2Stereo::shared_ptr K(new Cal3_S2Stereo(fx,fy,s,u0,v0,b));
  
  ifstream pose_file(pose_loc.c_str());
  cout << "Reading camera poses" << endl;
  int pose_id;
  MatrixRowMajor m(4,4);
  //read camera pose parameters and use to make initial estimates of camera poses
  while (pose_file >> pose_id) {
    for (int i = 0; i < 16; i++) {
      pose_file >> m.data()[i];
    }
    initial_estimate.insert(Symbol('x', pose_id), Pose3(m));
  }
  
  // camera and landmark keys
  size_t x, l;

  // pixel coordinates uL, uR, v (same for left/right images due to rectification)
  // landmark coordinates X, Y, Z in camera frame, resulting from triangulation
  double uL, uR, v, X, Y, Z;
  ifstream factor_file(factor_loc.c_str());
  cout << "Reading stereo factors" << endl;
  //read stereo measurement details from file and use to create and add GenericStereoFactor objects to the graph representation
  while (factor_file >> x >> l >> uL >> uR >> v >> X >> Y >> Z) {
    graph.push_back(
        GenericStereoFactor<Pose3, Point3>(StereoPoint2(uL, uR, v), model,
            Symbol('x', x), Symbol('l', l), K));
    //if the landmark variable included in this factor has not yet been added to the initial variable value estimate, add it
    if (!initial_estimate.exists(Symbol('l', l))) {
      Pose3 camPose = initial_estimate.at<Pose3>(Symbol('x', x));
      //transform_from() transforms the input Point3 from the camera pose space, camPose, to the global space
      Point3 worldPoint = camPose.transform_from(Point3(X, Y, Z));
      initial_estimate.insert(Symbol('l', l), worldPoint);
    }
  }

  Pose3 first_pose = initial_estimate.at<Pose3>(Symbol('x',1));
  //constrain the first pose such that it cannot change from its original value during optimization
  // NOTE: NonlinearEquality forces the optimizer to use QR rather than Cholesky
  // QR is much slower than Cholesky, but numerically more stable
  graph.push_back(NonlinearEquality<Pose3>(Symbol('x',1),first_pose));

  cout << "Optimizing" << endl;
  //create Levenberg-Marquardt optimizer to optimize the factor graph
  LevenbergMarquardtOptimizer optimizer = LevenbergMarquardtOptimizer(graph, initial_estimate);
  Values result = optimizer.optimize();

  cout << "Final result sample:" << endl;
  Values pose_values = result.filter<Pose3>();
  pose_values.print("Final camera poses:\n");

  return 0;
}
Example #27
0
/* ************************************************************************* */
Pose3 Pose3::transform_to(const Pose3& pose) const {
  Rot3 cRv = R_ * Rot3(pose.R_.inverse());
  Point3 t = pose.transform_to(t_);
  return Pose3(cRv, t);
}
Example #28
0
int main() {

  const std::string sInputDir = stlplus::folder_up(string(THIS_SOURCE_DIR))
    + "/imageData/SceauxCastle/";
  const string jpg_filenameL = sInputDir + "100_7101.jpg";
  const string jpg_filenameR = sInputDir + "100_7102.jpg";

  Image<unsigned char> imageL, imageR;
  ReadImage(jpg_filenameL.c_str(), &imageL);
  ReadImage(jpg_filenameR.c_str(), &imageR);

  //--
  // Detect regions thanks to an image_describer
  //--
  using namespace openMVG::features;
  std::unique_ptr<Image_describer> image_describer(new SIFT_Image_describer);
  std::map<IndexT, std::unique_ptr<features::Regions> > regions_perImage;
  image_describer->Describe(imageL, regions_perImage[0]);
  image_describer->Describe(imageR, regions_perImage[1]);

  const SIFT_Regions* regionsL = dynamic_cast<SIFT_Regions*>(regions_perImage.at(0).get());
  const SIFT_Regions* regionsR = dynamic_cast<SIFT_Regions*>(regions_perImage.at(1).get());

  const PointFeatures
    featsL = regions_perImage.at(0)->GetRegionsPositions(),
    featsR = regions_perImage.at(1)->GetRegionsPositions();

  // Show both images side by side
  {
    Image<unsigned char> concat;
    ConcatH(imageL, imageR, concat);
    string out_filename = "01_concat.jpg";
    WriteImage(out_filename.c_str(), concat);
  }

  //- Draw features on the two image (side by side)
  {
    Image<unsigned char> concat;
    ConcatH(imageL, imageR, concat);

    //-- Draw features :
    for (size_t i=0; i < featsL.size(); ++i )  {
      const SIOPointFeature point = regionsL->Features()[i];
      DrawCircle(point.x(), point.y(), point.scale(), 255, &concat);
    }
    for (size_t i=0; i < featsR.size(); ++i )  {
      const SIOPointFeature point = regionsR->Features()[i];
      DrawCircle(point.x()+imageL.Width(), point.y(), point.scale(), 255, &concat);
    }
    string out_filename = "02_features.jpg";
    WriteImage(out_filename.c_str(), concat);
  }

  std::vector<IndMatch> vec_PutativeMatches;
  //-- Perform matching -> find Nearest neighbor, filtered with Distance ratio
  {
    // Define a matcher and a metric to find corresponding points
    typedef SIFT_Regions::DescriptorT DescriptorT;
    typedef L2_Vectorized<DescriptorT::bin_type> Metric;
    typedef ArrayMatcherBruteForce<DescriptorT::bin_type, Metric> MatcherT;
    // Distance ratio squared due to squared metric
    getPutativesMatches<DescriptorT, MatcherT>(
      ((SIFT_Regions*)regions_perImage.at(0).get())->Descriptors(),
      ((SIFT_Regions*)regions_perImage.at(1).get())->Descriptors(),
      Square(0.8), vec_PutativeMatches);

    IndMatchDecorator<float> matchDeduplicator(
            vec_PutativeMatches, featsL, featsR);
    matchDeduplicator.getDeduplicated(vec_PutativeMatches);

    std::cout
      << regions_perImage.at(0)->RegionCount() << " #Features on image A" << std::endl
      << regions_perImage.at(1)->RegionCount() << " #Features on image B" << std::endl
      << vec_PutativeMatches.size() << " #matches with Distance Ratio filter" << std::endl;

    // Draw correspondences after Nearest Neighbor ratio filter
    svgDrawer svgStream( imageL.Width() + imageR.Width(), max(imageL.Height(), imageR.Height()));
    svgStream.drawImage(jpg_filenameL, imageL.Width(), imageL.Height());
    svgStream.drawImage(jpg_filenameR, imageR.Width(), imageR.Height(), imageL.Width());
    for (size_t i = 0; i < vec_PutativeMatches.size(); ++i) {
      //Get back linked feature, draw a circle and link them by a line
      const SIOPointFeature L = regionsL->Features()[vec_PutativeMatches[i]._i];
      const SIOPointFeature R = regionsR->Features()[vec_PutativeMatches[i]._j];
      svgStream.drawLine(L.x(), L.y(), R.x()+imageL.Width(), R.y(), svgStyle().stroke("green", 2.0));
      svgStream.drawCircle(L.x(), L.y(), L.scale(), svgStyle().stroke("yellow", 2.0));
      svgStream.drawCircle(R.x()+imageL.Width(), R.y(), R.scale(),svgStyle().stroke("yellow", 2.0));
    }
    const std::string out_filename = "03_siftMatches.svg";
    std::ofstream svgFile( out_filename.c_str() );
    svgFile << svgStream.closeSvgFile().str();
    svgFile.close();
  }

  // Essential geometry filtering of putative matches
  {
    Mat3 K;
    //read K from file
    if (!readIntrinsic(stlplus::create_filespec(sInputDir,"K","txt"), K))
    {
      std::cerr << "Cannot read intrinsic parameters." << std::endl;
      return EXIT_FAILURE;
    }

    //A. prepare the corresponding putatives points
    Mat xL(2, vec_PutativeMatches.size());
    Mat xR(2, vec_PutativeMatches.size());
    for (size_t k = 0; k < vec_PutativeMatches.size(); ++k)  {
      const PointFeature & imaL = featsL[vec_PutativeMatches[k]._i];
      const PointFeature & imaR = featsR[vec_PutativeMatches[k]._j];
      xL.col(k) = imaL.coords().cast<double>();
      xR.col(k) = imaR.coords().cast<double>();
    }

    //B. Compute the relative pose thanks to a essential matrix estimation
    std::pair<size_t, size_t> size_imaL(imageL.Width(), imageL.Height());
    std::pair<size_t, size_t> size_imaR(imageR.Width(), imageR.Height());
    sfm::RelativePose_Info relativePose_info;
    if (!sfm::robustRelativePose(K, K, xL, xR, relativePose_info, size_imaL, size_imaR, 256))
    {
      std::cerr << " /!\\ Robust relative pose estimation failure."
        << std::endl;
      return EXIT_FAILURE;
    }

    std::cout << "\nFound an Essential matrix:\n"
      << "\tprecision: " << relativePose_info.found_residual_precision << " pixels\n"
      << "\t#inliers: " << relativePose_info.vec_inliers.size() << "\n"
      << "\t#matches: " << vec_PutativeMatches.size()
      << std::endl;

    // Show Essential validated point
    svgDrawer svgStream( imageL.Width() + imageR.Width(), max(imageL.Height(), imageR.Height()));
    svgStream.drawImage(jpg_filenameL, imageL.Width(), imageL.Height());
    svgStream.drawImage(jpg_filenameR, imageR.Width(), imageR.Height(), imageL.Width());
    for (size_t i = 0; i < relativePose_info.vec_inliers.size(); ++i)  {
      const SIOPointFeature & LL = regionsL->Features()[vec_PutativeMatches[relativePose_info.vec_inliers[i]]._i];
      const SIOPointFeature & RR = regionsR->Features()[vec_PutativeMatches[relativePose_info.vec_inliers[i]]._j];
      const Vec2f L = LL.coords();
      const Vec2f R = RR.coords();
      svgStream.drawLine(L.x(), L.y(), R.x()+imageL.Width(), R.y(), svgStyle().stroke("green", 2.0));
      svgStream.drawCircle(L.x(), L.y(), LL.scale(), svgStyle().stroke("yellow", 2.0));
      svgStream.drawCircle(R.x()+imageL.Width(), R.y(), RR.scale(),svgStyle().stroke("yellow", 2.0));
    }
    const std::string out_filename = "04_ACRansacEssential.svg";
    std::ofstream svgFile( out_filename.c_str() );
    svgFile << svgStream.closeSvgFile().str();
    svgFile.close();

    //C. Triangulate and export inliers as a PLY scene
    std::vector<Vec3> vec_3DPoints;

    // Setup camera intrinsic and poses
    Pinhole_Intrinsic intrinsic0(imageL.Width(), imageL.Height(), K(0, 0), K(0, 2), K(1, 2));
    Pinhole_Intrinsic intrinsic1(imageR.Width(), imageR.Height(), K(0, 0), K(0, 2), K(1, 2));

    const Pose3 pose0 = Pose3(Mat3::Identity(), Vec3::Zero());
    const Pose3 pose1 = relativePose_info.relativePose;

    // Init structure by inlier triangulation
    const Mat34 P1 = intrinsic0.get_projective_equivalent(pose0);
    const Mat34 P2 = intrinsic1.get_projective_equivalent(pose1);
    std::vector<double> vec_residuals;
    vec_residuals.reserve(relativePose_info.vec_inliers.size() * 4);
    for (size_t i = 0; i < relativePose_info.vec_inliers.size(); ++i)  {
      const SIOPointFeature & LL = regionsL->Features()[vec_PutativeMatches[relativePose_info.vec_inliers[i]]._i];
      const SIOPointFeature & RR = regionsR->Features()[vec_PutativeMatches[relativePose_info.vec_inliers[i]]._j];
      // Point triangulation
      Vec3 X;
      TriangulateDLT(P1, LL.coords().cast<double>(), P2, RR.coords().cast<double>(), &X);
      // Reject point that is behind the camera
      if (pose0.depth(X) < 0 && pose1.depth(X) < 0)
        continue;
      const Vec2 residual0 = intrinsic0.residual(pose0, X, LL.coords().cast<double>());
      const Vec2 residual1 = intrinsic1.residual(pose1, X, RR.coords().cast<double>());
      vec_residuals.push_back(fabs(residual0(0)));
      vec_residuals.push_back(fabs(residual0(1)));
      vec_residuals.push_back(fabs(residual1(0)));
      vec_residuals.push_back(fabs(residual1(1)));
    }

    // Display some statistics of reprojection errors
    float dMin, dMax, dMean, dMedian;
    minMaxMeanMedian<float>(vec_residuals.begin(), vec_residuals.end(),
      dMin, dMax, dMean, dMedian);

    std::cout << std::endl
      << "Triangulation residuals statistics:" << "\n"
      << "\t-- Residual min:\t" << dMin << "\n"
      << "\t-- Residual median:\t" << dMedian << "\n"
      << "\t-- Residual max:\t " << dMax << "\n"
      << "\t-- Residual mean:\t " << dMean << std::endl;

      // Export as PLY (camera pos + 3Dpoints)
      std::vector<Vec3> vec_camPos;
      vec_camPos.push_back( pose0.center() );
      vec_camPos.push_back( pose1.center() );
      exportToPly(vec_3DPoints, vec_camPos, "EssentialGeometry.ply");
  }
  return EXIT_SUCCESS;
}