//***************************************************************************
TEST(GPSData, init) {

  // GPS Reading 1 will be ENU origin
  double t1 = 84831;
  Point3 NED1(0, 0, 0);
  LocalCartesian enu(35.4393283333333, -119.062986666667, 275.54,
      Geocentric::WGS84);

  // GPS Readin 2
  double t2 = 84831.5;
  double E, N, U;
  enu.Forward(35.4394633333333, -119.063146666667, 276.52, E, N, U);
  Point3 NED2(N, E, -U);

  // Estimate initial state
  Pose3 T;
  Vector3 nV;
  boost::tie(T, nV) = GPSFactor::EstimateState(t1, NED1, t2, NED2, 84831.0796);

  // Check values values
  EXPECT(assert_equal((Vector )Vector3(29.9575, -29.0564, -1.95993), nV, 1e-4));
  EXPECT( assert_equal(Rot3::ypr(-0.770131, 0.046928, 0), T.rotation(), 1e-5));
  Point3 expectedT(2.38461, -2.31289, -0.156011);
  EXPECT(assert_equal(expectedT, T.translation(), 1e-5));
}
Beispiel #2
0
/* ************************************************************************* */
double Pose3::range(const Pose3& pose, OptionalJacobian<1, 6> H1,
                    OptionalJacobian<1, 6> H2) const {
  Matrix13 D_local_point;
  double r = range(pose.translation(), H1, H2 ? &D_local_point : 0);
  if (H2) *H2 << Matrix13::Zero(), D_local_point * pose.rotation().matrix();
  return r;
}
void Frustum_Filter::init_z_near_z_far_depth
(
  const SfM_Data & sfm_data,
  const double zNear,
  const double zFar
)
{
  // If z_near & z_far are -1 and structure if not empty,
  //  compute the values for each camera and the structure
  const bool bComputed_Z = (zNear == -1. && zFar == -1.) && !sfm_data.structure.empty();
  if (bComputed_Z)  // Compute the near & far planes from the structure and view observations
  {
    for (Landmarks::const_iterator itL = sfm_data.GetLandmarks().begin();
      itL != sfm_data.GetLandmarks().end(); ++itL)
    {
      const Landmark & landmark = itL->second;
      const Vec3 & X = landmark.X;
      for (Observations::const_iterator iterO = landmark.obs.begin();
        iterO != landmark.obs.end(); ++iterO)
      {
        const IndexT id_view = iterO->first;
        const View * view = sfm_data.GetViews().at(id_view).get();
        if (!sfm_data.IsPoseAndIntrinsicDefined(view))
          continue;

        const Pose3 pose = sfm_data.GetPoseOrDie(view);
        const double z = Depth(pose.rotation(), pose.translation(), X);
        NearFarPlanesT::iterator itZ = z_near_z_far_perView.find(id_view);
        if (itZ != z_near_z_far_perView.end())
        {
          if ( z < itZ->second.first)
            itZ->second.first = z;
          else
          if ( z > itZ->second.second)
            itZ->second.second = z;
        }
        else
          z_near_z_far_perView[id_view] = {z,z};
      }
    }
  }
  else
  {
    // Init the same near & far limit for all the valid views
    for (Views::const_iterator it = sfm_data.GetViews().begin();
    it != sfm_data.GetViews().end(); ++it)
    {
      const View * view = it->second.get();
      if (!sfm_data.IsPoseAndIntrinsicDefined(view))
        continue;
      if (z_near_z_far_perView.find(view->id_view) == z_near_z_far_perView.end())
        z_near_z_far_perView[view->id_view] = {zNear, zFar};
    }
  }
}
Beispiel #4
0
/* ************************************************************************* */
BearingS2 BearingS2::fromForwardObservation(const Pose3& A, const Point3& B) {
  //  Cnb = DCMnb(Att);
  Matrix Cnb = A.rotation().matrix().transpose();

  Vector p_rel_c = Cnb*(B - A.translation());

  // FIXME: the matlab code checks for p_rel_c(0) greater than

  //  azi = atan2(p_rel_c(2),p_rel_c(1));
  double azimuth = atan2(p_rel_c(1),p_rel_c(0));
  //  elev = atan2(p_rel_c(3),sqrt(p_rel_c(1)^2 + p_rel_c(2)^2));
  double elevation = atan2(p_rel_c(2),sqrt(p_rel_c(0) * p_rel_c(0) + p_rel_c(1) * p_rel_c(1)));
  return BearingS2(azimuth, elevation);
}
Beispiel #5
0
/* ************************************************************************* */
Vector6 Pose3::ChartAtOrigin::Local(const Pose3& T, ChartJacobian H) {
#ifdef GTSAM_POSE3_EXPMAP
  return Logmap(T, H);
#else
  Matrix3 DR;
  Vector3 omega = Rot3::LocalCoordinates(T.rotation(), H ? &DR : 0);
  if (H) {
    *H = I_6x6;
    H->topLeftCorner<3,3>() = DR;
  }
  Vector6 xi;
  xi << omega, T.translation().vector();
  return xi;
#endif
}
Beispiel #6
0
/* ************************************************************************* */
BearingS2 BearingS2::fromDownwardsObservation(const Pose3& A, const Point3& B) {
  //  Cnb = DCMnb(Att);
  Matrix Cnb = A.rotation().matrix().transpose();

  //  Cbc = [0,0,1;0,1,0;-1,0,0];
  Matrix Cbc = (Matrix(3,3) <<
      0.,0.,1.,
      0.,1.,0.,
      -1.,0.,0.).finished();
  //  p_rel_c = Cbc*Cnb*(PosObj - Pos);
  Vector p_rel_c = Cbc*Cnb*(B - A.translation());

  // FIXME: the matlab code checks for p_rel_c(0) greater than

  //  azi = atan2(p_rel_c(2),p_rel_c(1));
  double azimuth = atan2(p_rel_c(1),p_rel_c(0));
  //  elev = atan2(p_rel_c(3),sqrt(p_rel_c(1)^2 + p_rel_c(2)^2));
  double elevation = atan2(p_rel_c(2),sqrt(p_rel_c(0) * p_rel_c(0) + p_rel_c(1) * p_rel_c(1)));
  return BearingS2(azimuth, elevation);
}
Beispiel #7
0
/* ************************************************************************* */
Vector6 Pose3::Logmap(const Pose3& p, OptionalJacobian<6, 6> H) {
  if (H) *H = LogmapDerivative(p);
  Vector3 w = Rot3::Logmap(p.rotation()), T = p.translation().vector();
  double t = w.norm();
  if (t < 1e-10) {
    Vector6 log;
    log << w, T;
    return log;
  } else {
    Matrix3 W = skewSymmetric(w / t);
    // Formula from Agrawal06iros, equation (14)
    // simplified with Mathematica, and multiplying in T to avoid matrix math
    double Tan = tan(0.5 * t);
    Vector3 WT = W * T;
    Vector3 u = T - (0.5 * t) * WT + (1 - t / (2. * Tan)) * (W * WT);
    Vector6 log;
    log << w, u;
    return log;
  }
}
Beispiel #8
0
/* OpenGL draw function & timing */
static void draw(void)
{
  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

  glMatrixMode(GL_MODELVIEW);
  glLoadIdentity();

  {
    // convert opengl coordinates into the document information coordinates
    glPushMatrix();
    glMultMatrixf((GLfloat*)m_convert);

    // apply view offset
    openMVG::Mat4 offset_w = l2w_Camera(Mat3::Identity(), Vec3(x_offset,y_offset,z_offset));
    glMultMatrixd((GLdouble*)offset_w.data());

    // then apply current camera transformation
    const View * view = sfm_data.GetViews().at(vec_cameras[current_cam]).get();
    const Pose3 pose = sfm_data.GetPoseOrDie(view);
    const openMVG::Mat4 l2w = l2w_Camera(pose.rotation(), pose.translation());

    glPushMatrix();
    glMultMatrixd((GLdouble*)l2w.data());

    glPointSize(3);
    glDisable(GL_TEXTURE_2D);
    glDisable(GL_LIGHTING);

    //Draw Structure in GREEN (as seen from the current camera)
    size_t nbPoint = sfm_data.GetLandmarks().size();
    size_t cpt = 0;
    glBegin(GL_POINTS);
    glColor3f(0.f,1.f,0.f);
    for (Landmarks::const_iterator iter = sfm_data.GetLandmarks().begin();
      iter != sfm_data.GetLandmarks().end(); ++iter)
    {
      const Landmark & landmark = iter->second;
      glVertex3d(landmark.X(0), landmark.X(1), landmark.X(2));
    }
    glEnd();

    glDisable(GL_CULL_FACE);

    for (int i_cam=0; i_cam < vec_cameras.size(); ++i_cam)
    {
      const View * view = sfm_data.GetViews().at(vec_cameras[i_cam]).get();
      const Pose3 pose = sfm_data.GetPoseOrDie(view);
      const IntrinsicBase * cam = sfm_data.GetIntrinsics().at(view->id_intrinsic).get();
      if (isPinhole(cam->getType()))
      {
        const Pinhole_Intrinsic * camPinhole = dynamic_cast<const Pinhole_Intrinsic*>(cam);

        // Move frame to draw the camera i_cam by applying its inverse transformation
        // Warning: translation has to be "fixed" to remove the current camera rotation

        // Fix camera_i translation with current camera rotation inverse
        const Vec3 trans = pose.rotation().transpose() * pose.translation();

        // compute inverse transformation matrix from local to world
        const openMVG::Mat4 l2w_i = l2w_Camera(pose.rotation().transpose(), -trans);
        // stack it and use it
        glPushMatrix();
        glMultMatrixd((GLdouble*)l2w_i.data());

        // 1. Draw optical center (RED) and image center (BLUE)
        glPointSize(3);
        glDisable(GL_TEXTURE_2D);
        glDisable(GL_LIGHTING);

        glBegin(GL_POINTS);
        glColor3f(1.f,0.f,0.f);
        glVertex3f(0, 0, 0); // optical center
        glColor3f(0.f,0.f,1.f);
        glVertex3f(0, 0, normalized_focal); // image center
        glEnd();

        // compute image corners coordinated with normalized focal (f=normalized_focal)
        const int w = camPinhole->w();
        const int h = camPinhole->h();

        const double focal = camPinhole->focal();
        // use principal point to adjust image center
        const Vec2 pp = camPinhole->principal_point();

        Vec3 c1(    -pp[0]/focal * normalized_focal, (-pp[1]+h)/focal * normalized_focal, normalized_focal);
        Vec3 c2((-pp[0]+w)/focal * normalized_focal, (-pp[1]+h)/focal * normalized_focal, normalized_focal);
        Vec3 c3((-pp[0]+w)/focal * normalized_focal,     -pp[1]/focal * normalized_focal, normalized_focal);
        Vec3 c4(    -pp[0]/focal * normalized_focal,     -pp[1]/focal * normalized_focal, normalized_focal);

        // 2. Draw thumbnail
        if (i_cam == current_cam)
        {
          glEnable(GL_TEXTURE_2D);
          glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
          glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

          glBindTexture(GL_TEXTURE_2D, m_image_vector[i_cam].texture);

          glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
          glEnable(GL_BLEND);
          glDisable(GL_DEPTH_TEST);

          if (i_cam == current_cam) {
            glColor4f(0.5f,0.5f,0.5f, 0.7f);
          } else {
            glColor4f(0.5f,0.5f,0.5f, 0.5f);
          }

          glBegin(GL_QUADS);
          glTexCoord2d(0.0,1.0);    glVertex3d(c1[0], c1[1], c1[2]);
          glTexCoord2d(1.0,1.0);    glVertex3d(c2[0], c2[1], c2[2]);
          glTexCoord2d(1.0,0.0);    glVertex3d(c3[0], c3[1], c3[2]);
          glTexCoord2d(0.0,0.0);    glVertex3d(c4[0], c4[1], c4[2]);
          glEnd();

          glDisable(GL_TEXTURE_2D);
          glDisable(GL_BLEND); glEnable(GL_DEPTH_TEST);
        }

       // 3. Draw camera cone
        if (i_cam == current_cam) {
          glColor3f(1.f,1.f,0.f);
        } else {
          glColor3f(1.f,0.f,0.f);
        }
        glBegin(GL_LINES);
        glVertex3d(0.0,0.0,0.0); glVertex3d(c1[0], c1[1], c1[2]);
        glVertex3d(0.0,0.0,0.0); glVertex3d(c2[0], c2[1], c2[2]);
        glVertex3d(0.0,0.0,0.0); glVertex3d(c3[0], c3[1], c3[2]);
        glVertex3d(0.0,0.0,0.0); glVertex3d(c4[0], c4[1], c4[2]);
        glVertex3d(c1[0], c1[1], c1[2]); glVertex3d(c2[0], c2[1], c2[2]);
        glVertex3d(c2[0], c2[1], c2[2]); glVertex3d(c3[0], c3[1], c3[2]);
        glVertex3d(c3[0], c3[1], c3[2]); glVertex3d(c4[0], c4[1], c4[2]);
        glVertex3d(c4[0], c4[1], c4[2]); glVertex3d(c1[0], c1[1], c1[2]);
        glEnd();

        glPopMatrix(); // go back to current camera frame
      }
    }
    glPopMatrix(); // go back to (document +offset) frame
    glPopMatrix(); // go back to identity
  }
}
bool CreateImageFile( const SfM_Data & sfm_data,
                      const std::string & sImagesFilename)
{
 /* images.txt
      # Image list with two lines of data per image:
      #   IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME
      #   POINTS2D[] as (X, Y, POINT3D_ID)
      # Number of images: X, mean observations per image: Y
  */

  // Header
  std::ofstream images_file( sImagesFilename );

  if ( ! images_file )
  {
    std::cerr << "Cannot write file" << sImagesFilename << std::endl;
    return false;
  }
  images_file << "# Image list with two lines of data per image:\n";
  images_file << "#   IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n";
  images_file << "#   POINTS2D[] as (X, Y, POINT3D_ID)\n";
  images_file << "# Number of images: X, mean observations per image: Y\n";

  std::map< IndexT, std::vector< std::tuple<double, double, IndexT> > > viewIdToPoints2D;
  const Landmarks & landmarks = sfm_data.GetLandmarks();
  {
    for ( Landmarks::const_iterator iterLandmarks = landmarks.begin();
          iterLandmarks != landmarks.end(); ++iterLandmarks)
    {
      const IndexT point3d_id = iterLandmarks->first;

      // Tally set of feature observations
      const Observations & obs = iterLandmarks->second.obs;
      for ( Observations::const_iterator itObs = obs.begin(); itObs != obs.end(); ++itObs )
      {
        const IndexT currentViewId = itObs->first;
        const Observation & ob = itObs->second;
        viewIdToPoints2D[currentViewId].push_back(std::make_tuple(ob.x( 0 ), ob.x( 1 ), point3d_id));
      }
    }
  }

  {
    C_Progress_display my_progress_bar( sfm_data.GetViews().size(), std::cout, "\n- CREATE IMAGE FILE -\n" );

    for (Views::const_iterator iter = sfm_data.GetViews().begin();
         iter != sfm_data.GetViews().end(); ++iter, ++my_progress_bar)
    {
      const View * view = iter->second.get();

      if ( !sfm_data.IsPoseAndIntrinsicDefined( view ) )
      {
        continue;
      }

      const Pose3 pose = sfm_data.GetPoseOrDie( view );
      const Mat3 rotation = pose.rotation();
      const Vec3 translation = pose.translation();

      const double Tx = translation[0];
      const double Ty = translation[1];
      const double Tz = translation[2];
      Eigen::Quaterniond q( rotation );
      const double Qx = q.x();
      const double Qy = q.y();
      const double Qz = q.z();
      const double Qw = q.w();

      const IndexT image_id = view->id_view;
      // Colmap's camera_ids correspond to openMVG's intrinsic ids
      const IndexT camera_id = view->id_intrinsic;                           
      const std::string image_name = view->s_Img_path;

      // first line per image
      //IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME
      images_file << image_id << " "
         << Qw << " "
         << Qx << " "
         << Qy << " "
         << Qz << " "
         << Tx << " "
         << Ty << " "
         << Tz << " "
         << camera_id << " "
         << image_name << " "     
         << "\n";

      // second line per image 
      //POINTS2D[] as (X, Y, POINT3D_ID)
      for (auto point2D: viewIdToPoints2D[image_id]) 
      {
        images_file << std::get<0>(point2D) << " " << 
        std::get<1>(point2D) << " " <<
        std::get<2>(point2D) << " ";
      }
      images_file << "\n";
    }
  }
  return true;
}
int main(int argc, char **argv)
{
  CmdLine cmd;

  std::string sSfM_Data_Filename;
  std::string sOutDir = "";

  cmd.add( make_option('i', sSfM_Data_Filename, "sfmdata") );
  cmd.add( make_option('o', sOutDir, "outdir") );

  try {
      if (argc == 1) throw std::string("Invalid command line parameter.");
      cmd.process(argc, argv);
  } catch(const std::string& s) {
      std::cerr << "Usage: " << argv[0] << '\n'
      << "[-i|--sfmdata] filename, the SfM_Data file to convert\n"
      << "[-o|--outdir] path.\n"
      << std::endl;

      std::cerr << s << std::endl;
      return EXIT_FAILURE;
  }

  std::cout << " You called : " <<std::endl
            << argv[0] << std::endl
            << "--sfmdata " << sSfM_Data_Filename << std::endl
            << "--outdir " << sOutDir << std::endl;

  bool bOneHaveDisto = false;
  
  // Create output dir
  if (!stlplus::folder_exists(sOutDir))
    stlplus::folder_create( sOutDir );

  // Read the SfM scene
  SfM_Data sfm_data;
  if (!Load(sfm_data, sSfM_Data_Filename, ESfM_Data(VIEWS|INTRINSICS|EXTRINSICS))) {
    std::cerr << std::endl
      << "The input SfM_Data file \""<< sSfM_Data_Filename << "\" cannot be read." << std::endl;
    return EXIT_FAILURE;
  }

  for(Views::const_iterator iter = sfm_data.GetViews().begin();
      iter != sfm_data.GetViews().end(); ++iter)
  {
    const View * view = iter->second.get();
    if (!sfm_data.IsPoseAndIntrinsicDefined(view))
        continue;
    
    // Valid view, we can ask a pose & intrinsic data
    const Pose3 pose = sfm_data.GetPoseOrDie(view);
    Intrinsics::const_iterator iterIntrinsic = sfm_data.GetIntrinsics().find(view->id_intrinsic);
    const IntrinsicBase * cam = iterIntrinsic->second.get();
    
    if (!cameras::isPinhole(cam->getType()))
        continue;
    const Pinhole_Intrinsic * pinhole_cam = static_cast<const Pinhole_Intrinsic *>(cam);
    
    // Extrinsic
    const Vec3 t = pose.translation();
    const Mat3 R = pose.rotation();
    // Intrinsic
    const double f = pinhole_cam->focal();
    const Vec2 pp = pinhole_cam->principal_point();

    // Image size in px
    const int w = pinhole_cam->w();
    const int h = pinhole_cam->h();
    
    // We can now create the .cam file for the View in the output dir 
    std::ofstream outfile( stlplus::create_filespec(
                sOutDir, stlplus::basename_part(view->s_Img_path), "cam" ).c_str() );
    // See https://github.com/nmoehrle/mvs-texturing/blob/master/Arguments.cpp
    // for full specs
    const int largerDim = w > h ? w : h;
    outfile << t(0) << " " << t(1) << " " << t(2) << " "
        << R(0,0) << " " << R(0,1) << " " << R(0,2) << " "
        << R(1,0) << " " << R(1,1) << " " << R(1,2) << " "
        << R(2,0) << " " << R(2,1) << " " << R(2,2) << "\n"
        << f / largerDim << " 0 0 1 " << pp(0) / w << " " << pp(1) / h;
    outfile.close();
    
    if(cam->have_disto())
      bOneHaveDisto = true;
  }
  
  const string sUndistMsg = bOneHaveDisto ? "undistorded" : "";
  const string sQuitMsg = std::string("Your SfM_Data file was succesfully converted!\n") + 
	  "Now you can copy your " + sUndistMsg + " images in the \"" + sOutDir + "\" directory and run MVS Texturing";
  std::cout << sQuitMsg << std::endl;
  return EXIT_SUCCESS;
}