コード例 #1
0
ファイル: MapMakerServerBase.cpp プロジェクト: wavelab/mcptam
//  Adds map points at a given initial depth in a specified pyramid level
void MapMakerServerBase::AddInitDepthMapPoints(MultiKeyFrame& mkfSrc, int nLevel, int nLimit, double dInitDepth)
{
  for (KeyFramePtrMap::iterator it = mkfSrc.mmpKeyFrames.begin(); it != mkfSrc.mmpKeyFrames.end(); it++)
  {
    KeyFrame& kfSrc = *(it->second);
    TaylorCamera& cameraSrc = mmCameraModels[kfSrc.mCamName];
    Level& level = kfSrc.maLevels[nLevel];
    ThinCandidates(kfSrc, nLevel);

    int nLevelScale = LevelScale(nLevel);

    std::cout << "AddInitDepthMapPoints, processing " << level.vCandidates.size() << " candidates" << std::endl;

    for (unsigned int i = 0; i < level.vCandidates.size() && static_cast<int>(i) < nLimit; ++i)
    {
      TooN::Vector<2> v2RootPos = LevelZeroPos(level.vCandidates[i].irLevelPos, nLevel);
      TooN::Vector<3> v3UnProj = cameraSrc.UnProject(v2RootPos) *
                           dInitDepth;  // This unprojects the noisy image location to a depth of dInitDepth

      MapPoint* pPointNew = new MapPoint;
      pPointNew->mv3WorldPos = kfSrc.mse3CamFromWorld.inverse() * v3UnProj;
      pPointNew->mpPatchSourceKF = &kfSrc;
      pPointNew->mbFixed = false;
      pPointNew->mnSourceLevel = nLevel;
      pPointNew->mv3Normal_NC = TooN::makeVector(0, 0, -1);
      pPointNew->mirCenter = level.vCandidates[i].irLevelPos;
      pPointNew->mv3Center_NC = cameraSrc.UnProject(v2RootPos);
      pPointNew->mv3OneRightFromCenter_NC = cameraSrc.UnProject(v2RootPos + vec(CVD::ImageRef(nLevelScale, 0)));
      pPointNew->mv3OneDownFromCenter_NC = cameraSrc.UnProject(v2RootPos + vec(CVD::ImageRef(0, nLevelScale)));

      normalize(pPointNew->mv3Center_NC);
      normalize(pPointNew->mv3OneDownFromCenter_NC);
      normalize(pPointNew->mv3OneRightFromCenter_NC);

      pPointNew->RefreshPixelVectors();

      mMap.mlpPoints.push_back(pPointNew);

      Measurement* pMeas = new Measurement;
      pMeas->v2RootPos = v2RootPos;
      pMeas->nLevel = nLevel;
      pMeas->eSource = Measurement::SRC_ROOT;

      kfSrc.AddMeasurement(pPointNew, pMeas);
      // kfSrc.mmpMeasurements[pPointNew] = pMeas;
      // pPointNew->mMMData.spMeasurementKFs.insert(&kfSrc);
    }
  }
}
コード例 #2
0
ファイル: MapMakerServerBase.cpp プロジェクト: wavelab/mcptam
// Tries to make a new map point out of a single candidate point
// by searching for that point in another keyframe, and triangulating
// if a match is found.
bool MapMakerServerBase::AddPointEpipolar(KeyFrame& kfSrc, KeyFrame& kfTarget, int nLevel, int nCandidate)
{
  // debug
  static GVars3::gvar3<int> gvnCrossCamera("CrossCamera", 1, GVars3::HIDDEN | GVars3::SILENT);
  if (!*gvnCrossCamera && kfSrc.mCamName != kfTarget.mCamName)
    return false;

  TaylorCamera& cameraSrc = mmCameraModels[kfSrc.mCamName];
  TaylorCamera& cameraTarget = mmCameraModels[kfTarget.mCamName];

  int nLevelScale = LevelScale(nLevel);
  Candidate& candidate = kfSrc.maLevels[nLevel].vCandidates[nCandidate];
  CVD::ImageRef irLevelPos = candidate.irLevelPos;
  TooN::Vector<2> v2RootPos = LevelZeroPos(irLevelPos, nLevel);  // The pixel coords of the candidate at level zero

  TooN::Vector<3> v3Ray_SC =
    cameraSrc.UnProject(v2RootPos);  // The pixel coords unprojected into a 3d half-line in the source kf frame
  TooN::Vector<3> v3LineDirn_TC =
    kfTarget.mse3CamFromWorld.get_rotation() * (kfSrc.mse3CamFromWorld.get_rotation().inverse() *
        v3Ray_SC);  // The direction of that line in the target kf frame
  TooN::Vector<3> v3CamCenter_TC =
    kfTarget.mse3CamFromWorld *
    kfSrc.mse3CamFromWorld.inverse().get_translation();  // The position of the source kf in the target kf frame
  TooN::Vector<3> v3CamCenter_SC =
    kfSrc.mse3CamFromWorld *
    kfTarget.mse3CamFromWorld.inverse().get_translation();  // The position of the target kf in the source kf frame

  double dMaxEpiAngle = M_PI / 3;  // the maximum angle spanned by two view rays allowed
  double dMinEpiAngle = 0.05;      // the minimum angle allowed

  // Want to figure out the min and max depths allowed on the source ray, which will be determined by the minimum and
  // maximum allowed epipolar angle
  // See diagram below, which shows the min and max epipolar angles.
  /*
   *              /\
   *             / m\
   *            /  i \
   *           /`. n  \
   *          / m `.   \
   *         /  a   `.  \
   *        /   x     `. \
   *       /____________`.\
   *    Source           Target
   */

  double dSeparationDist = norm(v3CamCenter_SC);
  double dSourceAngle =
    acos((v3CamCenter_SC * v3Ray_SC) / dSeparationDist);  // v3Ray_SC is unit length so don't have to divide

  double dMinTargetAngle = M_PI - dSourceAngle - dMaxEpiAngle;
  double dStartDepth = dSeparationDist * sin(dMinTargetAngle) / sin(dMaxEpiAngle);

  double dMaxTargetAngle = M_PI - dSourceAngle - dMinEpiAngle;
  double dEndDepth = dSeparationDist * sin(dMaxTargetAngle) / sin(dMinEpiAngle);

  if (dStartDepth < 0.2)  // don't bother looking too close
    dStartDepth = 0.2;

  ROS_DEBUG_STREAM("dStartDepth: " << dStartDepth << " dEndDepth: " << dEndDepth);

  TooN::Vector<3> v3RayStart_TC =
    v3CamCenter_TC + dStartDepth * v3LineDirn_TC;  // The start of the epipolar line segment in the target kf frame
  TooN::Vector<3> v3RayEnd_TC =
    v3CamCenter_TC + dEndDepth * v3LineDirn_TC;  // The end of the epipolar line segment in the target kf frame

  // Project epipolar line segment start and end points onto unit sphere and check for minimum distance between them
  TooN::Vector<3> v3A = v3RayStart_TC;
  normalize(v3A);
  TooN::Vector<3> v3B = v3RayEnd_TC;
  normalize(v3B);
  TooN::Vector<3> v3BetweenEndpoints = v3A - v3B;

  if (v3BetweenEndpoints * v3BetweenEndpoints < 0.00000001)
  {
    ROS_DEBUG_STREAM("MapMakerServerBase: v3BetweenEndpoints too small.");
    return false;
  }

  // Now we want to construct a bunch of hypothetical point locations, so we can warp the source patch
  // into the target KF and look for a match. To do this, need to partition the epipolar arc in the target
  // KF equally, rather than the source ray equally. The epipolar arc lies at the intersection of the epipolar
  // plane and the unit circle of the target KF. We will construct a matrix that projects 3-vectors onto
  // the epipolar plane, and use it to define the start and stop coordinates of a unit circle by
  // projecting the ray start and ray end vectors. Then it's just a matter of partitioning the unit circle, and
  // projecting each partition point onto the source ray (keeping in mind that the source ray is in the
  // epipolar plane too).

  // Find the normal of the epipolar plane
  TooN::Vector<3> v3PlaneNormal = v3A ^ v3B;
  normalize(v3PlaneNormal);
  TooN::Vector<3> v3PlaneI =
    v3A;  // Lets call the vector we got from the start of the epipolar line segment the new coordinate frame's x axis
  TooN::Vector<3> v3PlaneJ = v3PlaneNormal ^ v3PlaneI;  // Get the y axis

  // This will convert a 3D point to the epipolar plane's coordinate frame
  TooN::Matrix<3> m3ToPlaneCoords;
  m3ToPlaneCoords[0] = v3PlaneI;
  m3ToPlaneCoords[1] = v3PlaneJ;
  m3ToPlaneCoords[2] = v3PlaneNormal;

  TooN::Vector<2> v2PlaneB = (m3ToPlaneCoords * v3B).slice<0, 2>();  // The vector we got from the end of the epipolar
  // line segment, in epipolar plane coordinates
  TooN::Vector<2> v2PlaneI = TooN::makeVector(1, 0);

  double dMaxAngleAlongCircle =
    acos(v2PlaneB * v2PlaneI);  // The angle between point B (now a 2D point in the plane) and the x axis

  // For stepping along the circle
  double dAngleStep = cameraTarget.OnePixelAngle() * LevelScale(nLevel) * 3;
  int nSteps = ceil(dMaxAngleAlongCircle / dAngleStep);
  dAngleStep = dMaxAngleAlongCircle / nSteps;

  TooN::Vector<2> v2RayStartInPlane = (m3ToPlaneCoords * v3RayStart_TC).slice<0, 2>();
  TooN::Vector<2> v2RayEndInPlane = (m3ToPlaneCoords * v3RayEnd_TC).slice<0, 2>();
  TooN::Vector<2> v2RayDirInPlane = v2RayEndInPlane - v2RayStartInPlane;
  normalize(v2RayDirInPlane);

  // First in world frame, second in camera frame
  std::vector<std::pair<TooN::Vector<3>, TooN::Vector<3>>> vMapPointPositions;
  TooN::SE3<> se3WorldFromTargetCam = kfTarget.mse3CamFromWorld.inverse();
  for (int i = 0; i < nSteps + 1; ++i)  // stepping along circle
  {
    double dAngle = i * dAngleStep;                                  // current angle
    TooN::Vector<2> v2CirclePoint = TooN::makeVector(cos(dAngle), sin(dAngle));  // point on circle

    // Backproject onto view ray, this is the depth along view ray where we intersect
    double dAlpha = (v2RayStartInPlane[0] * v2CirclePoint[1] - v2RayStartInPlane[1] * v2CirclePoint[0]) /
                    (v2RayDirInPlane[1] * v2CirclePoint[0] - v2RayDirInPlane[0] * v2CirclePoint[1]);

    TooN::Vector<3> v3PointPos_TC = v3RayStart_TC + dAlpha * v3LineDirn_TC;
    TooN::Vector<3> v3PointPos = se3WorldFromTargetCam * v3PointPos_TC;
    vMapPointPositions.push_back(std::make_pair(v3PointPos, v3PointPos_TC));
  }

  // This will be the map point that we place at the different depths in order to generate warped patches
  MapPoint point;
  point.mpPatchSourceKF = &kfSrc;
  point.mnSourceLevel = nLevel;
  point.mv3Normal_NC = TooN::makeVector(0, 0, -1);
  point.mirCenter = irLevelPos;
  point.mv3Center_NC = cameraSrc.UnProject(v2RootPos);
  point.mv3OneRightFromCenter_NC = cameraSrc.UnProject(v2RootPos + vec(CVD::ImageRef(nLevelScale, 0)));
  point.mv3OneDownFromCenter_NC = cameraSrc.UnProject(v2RootPos + vec(CVD::ImageRef(0, nLevelScale)));

  normalize(point.mv3Center_NC);
  normalize(point.mv3OneRightFromCenter_NC);
  normalize(point.mv3OneDownFromCenter_NC);

  PatchFinder finder;
  int nMaxZMSSD = finder.mnMaxSSD + 1;
  int nBestZMSSD = nMaxZMSSD;
  int nBest = -1;
  TooN::Vector<2> v2BestMatch = TooN::Zeros;

  std::vector<std::tuple<int, int, TooN::Vector<2>>> vScoresIndicesBestMatches;

  for (unsigned i = 0; i < vMapPointPositions.size(); ++i)  // go through all our hypothesized map points
  {
    point.mv3WorldPos = vMapPointPositions[i].first;
    point.RefreshPixelVectors();

    TooN::Vector<2> v2Image = cameraTarget.Project(vMapPointPositions[i].second);

    if (cameraTarget.Invalid())
      continue;

    if (!kfTarget.maLevels[0].image.in_image(CVD::ir(v2Image)))
      continue;

    // Check if projected point is in a masked portion of the target keyframe
    if (kfTarget.maLevels[0].mask.totalsize() > 0 && kfTarget.maLevels[0].mask[CVD::ir(v2Image)] == 0)
      continue;

    TooN::Matrix<2> m2CamDerivs = cameraTarget.GetProjectionDerivs();

    int nSearchLevel = finder.CalcSearchLevelAndWarpMatrix(point, kfTarget.mse3CamFromWorld, m2CamDerivs);
    if (nSearchLevel == -1)
      continue;

    finder.MakeTemplateCoarseCont(point);

    if (finder.TemplateBad())
      continue;

    int nScore;
    bool bExhaustive =
      false;  // Should we do an exhaustive search of the target area? Should maybe make this into a param
    bool bFound = finder.FindPatchCoarse(CVD::ir(v2Image), kfTarget, 3, nScore, bExhaustive);

    if (!bFound)
      continue;

    vScoresIndicesBestMatches.push_back(std::make_tuple(nScore, i, finder.GetCoarsePosAsVector()));

    if (nScore < nBestZMSSD)
    {
      nBestZMSSD = nScore;
      nBest = i;
      v2BestMatch = finder.GetCoarsePosAsVector();
    }
  }

  if (nBest == -1)
  {
    ROS_DEBUG_STREAM("No match found.");
    return false;
  }

  std::sort(vScoresIndicesBestMatches.begin(), vScoresIndicesBestMatches.end(), compScores);

  // We want matches that are unambigous, so if there are many good matches along the view ray,
  // we can't say for certain where the best one really is. Therefore, implement the following rule:
  // Best zmssd has to be 10% better than nearest other, unless that nearest other is 1 index away
  // from best

  int nResizeTo = 1;
  for (unsigned i = 1; i < vScoresIndicesBestMatches.size(); ++i)
  {
    if (std::get<0>(vScoresIndicesBestMatches[i]) > nBestZMSSD * 0.9)  // within 10% of best
      nResizeTo++;
  }

  // Too many high scoring points, since the best can be within 10% of at most two other points.
  // We can't be certain of what is best, get out of here
  if (nResizeTo > 3)
    return false;

  vScoresIndicesBestMatches.resize(nResizeTo);  // chop!

  // All the points left in vScoresIndicesBestMatches should be within 1 idx of best, otherwise our matches are ambigous
  // Test index distance:
  for (unsigned i = 1; i < vScoresIndicesBestMatches.size(); ++i)
  {
    if (abs(std::get<1>(vScoresIndicesBestMatches[i]) - nBest) > 1)  // bad, index too far away, get out of here
      return false;
  }

  // Now all the points in vScoresIndicesBestMatches can be considered potential matches

  TooN::Vector<2> v2SubPixPos = TooN::makeVector(-1, -1);
  bool bGotGoodSubpix = false;
  for (unsigned i = 0; i < vScoresIndicesBestMatches.size(); ++i)  // go through all potential good matches
  {
    int nCurrBest = std::get<1>(vScoresIndicesBestMatches[i]);
    TooN::Vector<2> v2CurrBestMatch = std::get<2>(vScoresIndicesBestMatches[i]);

    point.mv3WorldPos = vMapPointPositions[nCurrBest].first;
    point.RefreshPixelVectors();

    cameraTarget.Project(vMapPointPositions[nCurrBest].second);
    TooN::Matrix<2> m2CamDerivs = cameraTarget.GetProjectionDerivs();

    finder.CalcSearchLevelAndWarpMatrix(point, kfTarget.mse3CamFromWorld, m2CamDerivs);
    finder.MakeTemplateCoarseCont(point);
    finder.SetSubPixPos(v2CurrBestMatch);

    // Try to get subpixel convergence
    bool bSubPixConverges = finder.IterateSubPixToConvergence(kfTarget, 10);

    if (!bSubPixConverges)
      continue;

    // First one to make it here wins. Keep in mind that vScoresIndicesBestMatches is ordered by
    // score, so we're trying the points in descent order of potential
    bGotGoodSubpix = true;
    v2SubPixPos = finder.GetSubPixPos();
    break;
  }

  // None of the candidates had subpix converge? Bad match...
  if (!bGotGoodSubpix)
    return false;

  // Now triangulate the 3d point...
  TooN::Vector<3> v3New;
  v3New = kfTarget.mse3CamFromWorld.inverse() *
          ReprojectPoint(kfSrc.mse3CamFromWorld * kfTarget.mse3CamFromWorld.inverse(), cameraSrc.UnProject(v2RootPos),
                         cameraTarget.UnProject(v2SubPixPos));

  MapPoint* pPointNew = new MapPoint;

  pPointNew->mv3WorldPos = v3New;

  // Patch source stuff:
  pPointNew->mpPatchSourceKF = &kfSrc;
  pPointNew->mnSourceLevel = nLevel;
  pPointNew->mv3Normal_NC = TooN::makeVector(0, 0, -1);
  pPointNew->mirCenter = irLevelPos;
  pPointNew->mv3Center_NC = cameraSrc.UnProject(v2RootPos);
  pPointNew->mv3OneRightFromCenter_NC = cameraSrc.UnProject(v2RootPos + vec(CVD::ImageRef(nLevelScale, 0)));
  pPointNew->mv3OneDownFromCenter_NC = cameraSrc.UnProject(v2RootPos + vec(CVD::ImageRef(0, nLevelScale)));

  normalize(pPointNew->mv3Center_NC);
  normalize(pPointNew->mv3OneDownFromCenter_NC);
  normalize(pPointNew->mv3OneRightFromCenter_NC);

  pPointNew->RefreshPixelVectors();

  Measurement* pMeasSrc = new Measurement;
  pMeasSrc->eSource = Measurement::SRC_ROOT;
  pMeasSrc->v2RootPos = v2RootPos;
  pMeasSrc->nLevel = nLevel;
  pMeasSrc->bSubPix = true;

  Measurement* pMeasTarget = new Measurement;
  *pMeasTarget = *pMeasSrc;  // copy data
  pMeasTarget->eSource = Measurement::SRC_EPIPOLAR;
  pMeasTarget->v2RootPos = v2SubPixPos;

  // Record map point and its measurement in the right places
  kfSrc.AddMeasurement(pPointNew, pMeasSrc);
  kfTarget.AddMeasurement(pPointNew, pMeasTarget);

  // kfSrc.mmpMeasurements[pPointNew] = pMeasSrc;
  // kfTarget.mmpMeasurements[pPointNew] = pMeasTarget;
  // pPointNew->mMMData.spMeasurementKFs.insert(&kfSrc);
  // pPointNew->mMMData.spMeasurementKFs.insert(&kfTarget);

  mMap.mlpPoints.push_back(pPointNew);
  mlpNewQueue.push_back(pPointNew);

  return true;
}
コード例 #3
0
ファイル: MapMakerCalib.cpp プロジェクト: wavelab/mcptam
// Initialize the map
bool MapMakerCalib::InitFromCalibImage(CalibImageTaylor& calibImage, double dSquareSize, std::string cameraName,
                                       TooN::SE3<>& se3TrackerPose)
{
    // Create a new MKF
    MultiKeyFrame* pMKF = new MultiKeyFrame;

    pMKF->mse3BaseFromWorld = calibImage.mse3CamFromWorld;     // set mkf pose to be pose from tracker
    pMKF->mse3BaseFromWorld.get_translation() *= dSquareSize;  // scale it
    pMKF->mbFixed = false;

    // Create a new KF (there will only be one for now)
    KeyFrame* pKF = new KeyFrame(pMKF, cameraName);
    pMKF->mmpKeyFrames[cameraName] = pKF;

    pKF->mse3CamFromWorld = pMKF->mse3BaseFromWorld;  // same as parent MKF
    pKF->mse3CamFromBase = TooN::SE3<>();                   // set relative pose to identity;
    pKF->mbActive = true;

    pKF->MakeKeyFrame_Lite(calibImage.mImage, true);
    pKF->MakeKeyFrame_Rest();

    // Create MapPoints where the calib image corners are
    // Testing to see if we should create points at all levels or just level 0
    for (int l = 0; l < LEVELS; ++l)
    {
        if (l >= 1)
            break;

        int nLevelScale = LevelScale(l);

        for (unsigned i = 0; i < calibImage.mvGridCorners.size(); ++i)
        {
            MapPoint* pNewPoint = new MapPoint;
            pNewPoint->mv3WorldPos.slice<0, 2>() = dSquareSize * CVD::vec(calibImage.mvGridCorners[i].mirGridPos);
            pNewPoint->mv3WorldPos[2] = 0.0;  // on z=0 plane
            pNewPoint->mbFixed = true;        // the calibration pattern is fixed
            pNewPoint->mbOptimized = true;    // since it won't move it's already in its optimal location

            // Patch source stuff:
            pNewPoint->mpPatchSourceKF = pKF;
            pNewPoint->mnSourceLevel = l;
            pNewPoint->mv3Normal_NC = TooN::makeVector(0, 0, -1);

            TooN::Vector<2> v2RootPos = calibImage.mvGridCorners[i].mParams.v2Pos;

            // Same code as in MapMakerServerBase::AddPointEpipolar
            pNewPoint->mirCenter = CVD::ir_rounded(LevelNPos(v2RootPos, l));
            pNewPoint->mv3Center_NC = mmCameraModels[cameraName].UnProject(v2RootPos);
            pNewPoint->mv3OneRightFromCenter_NC =
                mmCameraModels[cameraName].UnProject(v2RootPos + CVD::vec(CVD::ImageRef(nLevelScale, 0)));
            pNewPoint->mv3OneDownFromCenter_NC =
                mmCameraModels[cameraName].UnProject(v2RootPos + CVD::vec(CVD::ImageRef(0, nLevelScale)));

            normalize(pNewPoint->mv3Center_NC);
            normalize(pNewPoint->mv3OneDownFromCenter_NC);
            normalize(pNewPoint->mv3OneRightFromCenter_NC);

            pNewPoint->RefreshPixelVectors();

            mMap.mlpPoints.push_back(pNewPoint);

            // Create a measurement of the point
            Measurement* pMeas = new Measurement;
            pMeas->eSource = Measurement::SRC_ROOT;
            pMeas->v2RootPos = v2RootPos;
            pMeas->nLevel = l;
            pMeas->bSubPix = true;
            pKF->mmpMeasurements[pNewPoint] = pMeas;

            pNewPoint->mMMData.spMeasurementKFs.insert(pKF);
        }
    }

    mMap.mlpMultiKeyFrames.push_back(pMKF);

    /*
    {
      ROS_DEBUG("After creating points, before optimization: ");
      int i=0;
      double dErrorSum = 0;
      for(MapPointPtrList::iterator it = mMap.mlpPoints.begin(); it != mMap.mlpPoints.end(); ++it, ++i)
      {
        std::cout<<"Point pos: "<<(*it)->mv3WorldPos;
        TooN::Vector<3> v3Cam = pKF->mse3CamFromWorld * (*it)->mv3WorldPos;
        TooN::Vector<2> v2Image = mmCameraModels[cameraName].Project(v3Cam);
        std::cout<<" Reprojected: "<<v2Image<<" Original: "<<calibImage.mvGridCorners[i].mParams.v2Pos;
        std::cout<<std::endl;

        TooN::Vector<2> v2Error = v2Image - calibImage.mvGridCorners[i].mParams.v2Pos;
        dErrorSum += v2Error * v2Error;
      }

      std::cout<<"Error sum: "<<dErrorSum<<" mean: "<<dErrorSum/i<<std::endl;
    }
    */

    mBundleAdjuster.SetNotConverged();

    int nSanityCounter = 0;
    std::vector<std::pair<KeyFrame*, MapPoint*>> vOutliers;

    ROS_DEBUG("MapMakerCalib: Initialized from calib image, running bundle adjuster");

    while (!mBundleAdjuster.ConvergedFull())
    {
        mBundleAdjuster.BundleAdjustAll(vOutliers);
        if (ResetRequested() || nSanityCounter > 5)
        {
            ROS_WARN("Dumping map to map_after.dat");
            DumpToFile("map_after.dat");
            ROS_ERROR("MapMakerCalib: Exceeded sanity counter or reset requested, bailing");
            return false;
        }

        nSanityCounter++;
    }

    // Don't allow any outliers when we're initializing, all of the calibration points turned MapPoints
    // should have been found
    if (vOutliers.size() > 0)
    {
        ROS_ERROR("MapMakerCalib: Found outliers in initialization, bailing");
        return false;
    }

    /*
    {
      ROS_DEBUG("After optimization: ");
      int i=0;
      double dErrorSum = 0;
      for(MapPointPtrList::iterator it = mMap.mlpPoints.begin(); it != mMap.mlpPoints.end(); ++it, ++i)
      {
        std::cout<<"Point pos: "<<(*it)->mv3WorldPos;
        TooN::Vector<3> v3Cam = pKF->mse3CamFromWorld * (*it)->mv3WorldPos;
        TooN::Vector<2> v2Image = mmCameraModels[cameraName].Project(v3Cam);
        std::cout<<" Reprojected: "<<v2Image<<" Original: "<<calibImage.mvGridCorners[i].mParams.v2Pos;
        std::cout<<std::endl;

        TooN::Vector<2> v2Error = v2Image - calibImage.mvGridCorners[i].mParams.v2Pos;
        dErrorSum += v2Error * v2Error;
      }

      std::cout<<"Error sum: "<<dErrorSum<<" mean: "<<dErrorSum/i<<std::endl;
    }
    */

    // Get the updated pose, tracker will initialize with this
    se3TrackerPose = pKF->mse3CamFromWorld;

    ROS_INFO_STREAM("MapMakerCalib: Made initial map with " << mMap.mlpPoints.size() << " points.");
    ROS_INFO_STREAM("MapMakerCalib: tracker pose: " << se3TrackerPose);

    mState = MM_RUNNING;  // not doing anything special for initialization (unlike MapMaker)
    mMap.mbGood = true;

    return true;
}