PinholeCamera<CALIBRATION> perturbCameraPoseAndCalibration(
    const PinholeCamera<CALIBRATION>& camera) {
  GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION)
  Pose3 noise_pose = Pose3(Rot3::ypr(-M_PI / 10, 0., -M_PI / 10),
      Point3(0.5, 0.1, 0.3));
  Pose3 cameraPose = camera.pose();
  Pose3 perturbedCameraPose = cameraPose.compose(noise_pose);
  typename gtsam::traits<CALIBRATION>::TangentVector d;
  d.setRandom();
  d *= 0.1;
  CALIBRATION perturbedCalibration = camera.calibration().retract(d);
  return PinholeCamera<CALIBRATION>(perturbedCameraPose, perturbedCalibration);
}
void TODBaseImporter::importCamera(PinholeCamera &camera) const
{
//  string cameraFilename = addFilename ? folder + "/camera.yml" : folder;
  //TODO: move up
  string cameraFilename = baseFolder + "/camera.yml";
  camera.read(cameraFilename);
}
//*************************************************************************
TEST (EssentialMatrixFactor3, extraTest) {

    // The "true E" in the body frame is
    EssentialMatrix bodyE = cRb.inverse() * trueE;

    for (size_t i = 0; i < 5; i++) {
        EssentialMatrixFactor3 factor(100, i, pA(i), pB(i), cRb, model2, K);

        // Check evaluation
        Point3 P1 = data.tracks[i].p;
        const Point2 pi = camera2.project(P1);
        Point2 reprojectionError(pi - pB(i));
        Vector expected = reprojectionError.vector();

        Matrix Hactual1, Hactual2;
        double d(baseline / P1.z());
        Vector actual = factor.evaluateError(bodyE, d, Hactual1, Hactual2);
        EXPECT(assert_equal(expected, actual, 1e-7));

        // Use numerical derivatives to calculate the expected Jacobian
        Matrix Hexpected1, Hexpected2;
        boost::function<Vector(const EssentialMatrix&, double)> f = boost::bind(
                    &EssentialMatrixFactor3::evaluateError, &factor, _1, _2, boost::none,
                    boost::none);
        Hexpected1 = numericalDerivative21<Vector2, EssentialMatrix, double>(f, bodyE, d);
        Hexpected2 = numericalDerivative22<Vector2, EssentialMatrix, double>(f, bodyE, d);

        // Verify the Jacobian is correct
        EXPECT(assert_equal(Hexpected1, Hactual1, 1e-6));
        EXPECT(assert_equal(Hexpected2, Hactual2, 1e-8));
    }
}
void drawTestScene()
{
    bMap->clear();
    cam.buildRays(*bMap,buffer,lights);
    //cam.buildCentralRay(bMap,&buffer,lights);
    RayLink *t;
    t = buffer.start->next;
    while(buffer.size > 0)
    {
        buffer.size--;
        t->task->execute(sceneTree);
        t = t->next;
        //delete t->task;

        /*
        t = buffer.getFront();
        t->task->execute(sceneTree);
        buffer.deleteFront();
        */
        countRayExecuted++;
    }
    /*
    unsigned char r, g, b;

    for(int i = 0; i < IMAGE_HEIGHT; i++)
    	for(int j = 0; j < IMAGE_WIDTH; j++)
    	{
    		r = ((i/3)%15 <= 2) ? 0 : 255;
    		g = ((j/3)%15 <= 2) ? 0 : 255;
    		b = ((i/3)%15 < (j/3)%15) ? 0 : 255;
    		bMap->setBMapPixel(i, j, r, g, b);
    	}
    */

    printf("drawn\n");
    buffer.clear();
}
void testTree()
{
    std::vector<Collidable *> *vec = new std::vector<Collidable*>();
    Point *p;
    printf("\ntesting tree \n");

    /*
    p = new Point(-4, -1, -3);
    p->precomputeBounds();
    vec->push_back(p);
    p = new Point(-4, -1, 4);
    p->precomputeBounds();
    vec->push_back(p);
    p = new Point(3, 2, -4);
    p->precomputeBounds();
    vec->push_back(p);
    p = new Point(3, 2, 3);
    p->precomputeBounds();
    vec->push_back(p);
    */


    p = new Point(-5, -5, -5);
    p->precomputeBounds();
    vec->push_back(p);
    p = new Point(5, -5, -5);
    p->precomputeBounds();
    vec->push_back(p);
    p = new Point(-5, 5, 5);
    p->precomputeBounds();
    vec->push_back(p);
    p = new Point(5, 5, 5);
    p->precomputeBounds();
    vec->push_back(p);
    p = new Point(-1, -1, -1);
    p->precomputeBounds();
    vec->push_back(p);
    p = new Point(1, -1, -1);
    p->precomputeBounds();
    vec->push_back(p);
    p = new Point(-1, 1, 1);
    p->precomputeBounds();
    vec->push_back(p);
    p = new Point(1, 1, 1);
    p->precomputeBounds();
    vec->push_back(p);

    KDTree *tr = new KDTree();
    tr->buildTreeStart(*vec);
    //printf("printing \n");
    tr->printTreeMod();
    printf("tree tested \n");


    bMap->clear();
    cam.buildCentralRay(*bMap,buffer,lights);
    RayLink *t;
    t = buffer.start->next;
    t->task->execute(*tr);

}
int main(int argc, char **argv)
{
  std::system("date");

  if (argc != 4)
  {
    cout << argv[0] << " <modelsPath> <baseFoldler> <testObjectName>" << endl;
    return -1;
  }
  string modelsPath = argv[1];
  string baseFolder = argv[2];
  string testObjectName = argv[3];

  //const string modelFilename = "finalModels/" + objectName + ".xml";

  const string testFolder = baseFolder + "/" + testObjectName + "/";

//  const string camerasListFilename = baseFolder + "/cameras.txt";
  const string kinectCameraFilename = baseFolder + "/center.yml";
//  const string visualizationPath = "visualized_results/";
  const string errorsVisualizationPath = "/home/ilysenkov/errors/";
//  const vector<string> objectNames = {"bank", "bucket"};
//  const vector<string> objectNames = {"bank", "bottle", "bucket", "glass", "wineglass"};
  const string registrationMaskFilename = baseFolder + "/registrationMask.png";

  const vector<string> objectNames = {testObjectName};



  TODBaseImporter dataImporter(testFolder);

  PinholeCamera kinectCamera;
  if(!kinectCameraFilename.empty())
  {
    dataImporter.readCameraParams(kinectCameraFilename, kinectCamera, false);
    CV_Assert(kinectCamera.imageSize == Size(640, 480));
  }

  vector<EdgeModel> edgeModels(objectNames.size());
  for (size_t i = 0; i < objectNames.size(); ++i)
  {
    dataImporter.importEdgeModel(modelsPath, objectNames[i], edgeModels[i]);
    cout << "All points in the model: " << edgeModels[i].points.size() << endl;
    cout << "Surface points in the model: " << edgeModels[i].stableEdgels.size() << endl;
    EdgeModel::computeSurfaceEdgelsOrientations(edgeModels[i]);
  }

  vector<EdgeModel> occlusionObjects;
  vector<PoseRT> occlusionOffsets;
  dataImporter.importOcclusionObjects(modelsPath, occlusionObjects, occlusionOffsets);


//#ifdef VISUALIZE_POSE_REFINEMENT
//  edgeModels[0].visualize();
//#endif

  DetectorParams params;
//  params.glassSegmentationParams.closingIterations = 8;
// bucket
//  params.glassSegmentationParams.openingIterations = 8;

  //good clutter
  params.glassSegmentationParams.openingIterations = 15;
  params.glassSegmentationParams.closingIterations = 12;
  params.glassSegmentationParams.finalClosingIterations = 32;
  params.glassSegmentationParams.grabCutErosionsIterations = 4;
  params.planeSegmentationMethod = FIDUCIALS;

  //fixedOnTable
  //params.glassSegmentationParams.finalClosingIterations = 8;

  //clutter
  //bucket
  //params.glassSegmentationParams.finalClosingIterations = 12;

  Detector detector(kinectCamera, params);
//  TransparentDetector detector(kinectCamera);
  for (size_t i = 0; i < edgeModels.size(); ++i)
  {
    detector.addTrainObject(objectNames[i], edgeModels[i]);
  }

  vector<int> testIndices;
  dataImporter.importTestIndices(testIndices);

  Mat registrationMask = imread(registrationMaskFilename, CV_LOAD_IMAGE_GRAYSCALE);
  CV_Assert(!registrationMask.empty());

  vector<size_t> initialPoseCount;
  vector<PoseError> bestPoses, bestInitialPoses;
  int segmentationFailuresCount = 0;
  int badSegmentationCount = 0;

  vector<float> allChamferDistances;
  vector<size_t> geometricHashingPoseCount;
//  vector<int> indicesOfRecognizedObjects;
  vector<double> allRecognitionTimes;
  for(size_t testIdx = 0; testIdx < testIndices.size(); testIdx++)
  {
    srand(42);
    RNG &rng = theRNG();
    rng.state = 0xffffffff;

#if defined(VISUALIZE_POSE_REFINEMENT) && defined(USE_3D_VISUALIZATION)
    boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer(new pcl::visualization::PCLVisualizer ("transparent experiments"));
#endif
    int testImageIdx = testIndices[ testIdx ];
    cout << "Test: " << testIdx << " " << testImageIdx << endl;

    Mat kinectDepth, kinectBgrImage;
    if(!kinectCameraFilename.empty())
    {
      dataImporter.importBGRImage(testImageIdx, kinectBgrImage);
      dataImporter.importDepth(testImageIdx, kinectDepth);
    }

/*
    Mat silhouetteImage(480, 640, CV_8UC1, Scalar(0));
    silhouettes[0].draw(silhouetteImage);
    imshow("silhouette", silhouetteImage);
    imshow("mask", centerMask);
    waitKey();



    vector<Point2f> glassContour;
    mask2contour(centerMask, glassContour);
    Mat silhouette2test;
    silhouettes[0].match(Mat(glassContour), silhouette2test);
    exit(-1);
*/

    PoseRT model2test_fiducials, objectOffset;
    dataImporter.importGroundTruth(testImageIdx, model2test_fiducials, false, &objectOffset);
    PoseRT model2test_ground = model2test_fiducials * objectOffset;
//    cout << "Ground truth: " << model2test_ground << endl;

    CV_Assert(edgeModels.size() == 1);
    float occlusionPercentage = computeOcclusionPercentage(kinectCamera, edgeModels[0], objectOffset, occlusionObjects, occlusionOffsets, model2test_fiducials);
    CV_Assert(0.0 <= occlusionPercentage && occlusionPercentage <= 1.0);
    cout << "occlusion percentage: " << occlusionPercentage << endl;

    pcl::PointCloud<pcl::PointXYZ> testPointCloud;
#ifdef USE_3D_VISUALIZATION
    dataImporter.importPointCloud(testImageIdx, testPointCloud);
#endif

#ifdef VISUALIZE_TEST_DATA
    imshow("rgb", kinectBgrImage);
    imshow("depth", kinectDepth * 20);
#endif

#ifdef VISUALIZE_POSE_REFINEMENT
#ifdef USE_3D_VISUALIZATION
    {
      vector<Point3f> cvTestPointCloud;
      pcl2cv(testPointCloud, cvTestPointCloud);
      cout << "test point cloud size: " << cvTestPointCloud.size() << endl;
      publishPoints(cvTestPointCloud, viewer, Scalar(0, 255, 0), "test point cloud");
    }

    publishPoints(edgeModels[0].points, viewer, Scalar(0, 0, 255), "ground object", model2test_ground);
#endif

    if(!kinectCameraFilename.empty())
    {
//      displayEdgels(glassMask, edgeModels[0].points, model2test_ground, kinectCamera, "kinect");
      showEdgels(kinectBgrImage, edgeModels[0].points, model2test_ground, kinectCamera, "ground truth");
      showEdgels(kinectBgrImage, edgeModels[0].stableEdgels, model2test_ground, kinectCamera, "ground truth surface");
    }
    namedWindow("ground truth");
#ifdef USE_3D_VISUALIZATION
    while (!viewer->wasStopped ())
    {
      viewer->spinOnce (100);
      boost::this_thread::sleep (boost::posix_time::microseconds (100000));
    }
    viewer->resetStoppedFlag();
#endif
    waitKey();
    destroyWindow("ground truth");
#endif

    vector<PoseRT> poses_cam;
    vector<float> posesQualities;
    vector<string> detectedObjectsNames;

    TickMeter recognitionTime;
    recognitionTime.start();

    Detector::DebugInfo debugInfo;
    try
    {
      detector.detect(kinectBgrImage, kinectDepth, registrationMask, testPointCloud, poses_cam, posesQualities, detectedObjectsNames, &debugInfo);
    }
    catch(const cv::Exception &)
    {
    }
    recognitionTime.stop();
#ifdef VISUALIZE_POSE_REFINEMENT
    Mat glassMask = debugInfo.glassMask;
    imshow("glassMask", glassMask);
    showSegmentation(kinectBgrImage, glassMask, "segmentation");

    Mat detectionResults = kinectBgrImage.clone();
    detector.visualize(poses_cam, detectedObjectsNames, detectionResults);
    imshow("detection", detectionResults);
    waitKey();
#endif

#ifndef PROFILE

    if (edgeModels.size() == 1)
    {
      vector<Point2f> groundEdgels;
      kinectCamera.projectPoints(edgeModels[0].points, model2test_ground, groundEdgels);

      vector<float> chamferDistances;
      for (size_t silhouetteIndex = 0; silhouetteIndex < debugInfo.initialSilhouettes.size(); ++silhouetteIndex)
      {
        vector<Point2f> silhouette = debugInfo.initialSilhouettes[silhouetteIndex];

        double silhoutteDistance = 0.0;
        for (int i = 0; i < silhouette.size(); ++i)
        {
          float minDist = std::numeric_limits<float>::max();
          for (int j = 0; j < groundEdgels.size(); ++j)
          {
            float currentDist = norm(silhouette[i] - groundEdgels[j]);
            if (currentDist < minDist)
            {
              minDist = currentDist;
            }
          }
          silhoutteDistance += minDist;
        }
        silhoutteDistance /= silhouette.size();
        chamferDistances.push_back(silhoutteDistance);
      }
      std::sort(chamferDistances.begin(), chamferDistances.end());
      if (chamferDistances.empty())
      {
        chamferDistances.push_back(std::numeric_limits<float>::max());
      }
      cout << "Best geometric hashing pose (px): " << chamferDistances[0] << endl;
      cout << "Number of initial poses: " << chamferDistances.size() << endl;
      allChamferDistances.push_back(chamferDistances[0]);
      geometricHashingPoseCount.push_back(chamferDistances.size());
    }

    if (poses_cam.size() == 0)
    {
      ++segmentationFailuresCount;
      continue;
    }

    if (!posesQualities.empty())
    {
      std::vector<float>::iterator bestDetection = std::min_element(posesQualities.begin(), posesQualities.end());
      int bestDetectionIndex = std::distance(posesQualities.begin(), bestDetection);
//      int detectedObjectIndex = detector.getTrainObjectIndex(detectedObjectsNames[bestDetectionIndex]);
//      indicesOfRecognizedObjects.push_back(detectedObjectIndex);
      cout << "Recognized object: " << detectedObjectsNames[bestDetectionIndex] << endl;
    }

    cout << "Recognition time: " << recognitionTime.getTimeSec() << "s" << endl;
    allRecognitionTimes.push_back(recognitionTime.getTimeSec());

    if (objectNames.size() == 1)
    {
      cout << "initial poses: " << debugInfo.initialPoses.size() << endl;
      vector<PoseError> initialPoseErrors;
      for (size_t i = 0 ; i < debugInfo.initialPoses.size(); ++i)
      {
        PoseError poseError;
        evaluatePoseWithRotation(edgeModels[0], debugInfo.initialPoses[i], model2test_ground, poseError);
        cout << poseError << endl;
        initialPoseErrors.push_back(poseError);
//        showEdgels(kinectBgrImage, edgeModels[0].points, debugInfo.initialPoses[i], kinectCamera, "gh pose");
//        waitKey();
      }
      cout << "the end." << endl;
      PoseError currentBestInitialError;
      {
        vector<PoseError>::iterator bestPoseIt = std::min_element(initialPoseErrors.begin(), initialPoseErrors.end());
        int bestPoseIdx = std::distance(initialPoseErrors.begin(), bestPoseIt);
        currentBestInitialError = initialPoseErrors[bestPoseIdx];
        cout << "Best initial error: " << currentBestInitialError << endl;
        bestInitialPoses.push_back(currentBestInitialError);
      }


      CV_Assert(poses_cam.size() == 1);
      int objectIndex = 0;
      initialPoseCount.push_back(poses_cam.size());

      vector<PoseError> currentPoseErrors(poses_cam.size());
      for (size_t i = 0 ; i < poses_cam.size(); ++i)
      {
        evaluatePoseWithRotation(edgeModels[objectIndex], poses_cam[i], model2test_ground, currentPoseErrors[i]);
        cout << currentPoseErrors[i] << endl;

#ifdef VISUALIZE_POSE_REFINEMENT
        namedWindow("pose is ready");
        waitKey();
        destroyWindow("pose is ready");
//        displayEdgels(glassMask, edgeModels[objectIndex].points, initPoses_cam[objectIndex][i], kinectCamera, "initial");
#ifdef USE_3D_VISUALIZATION
        publishPoints(edgeModels[objectIndex].points, viewer, Scalar(255, 0, 0), "final object", poses_cam[i]);
#endif
        showEdgels(kinectBgrImage, edgeModels[objectIndex].points, poses_cam[i], kinectCamera, "final");
        showEdgels(kinectBgrImage, edgeModels[objectIndex].stableEdgels, poses_cam[i], kinectCamera, "final surface");
        namedWindow("initial pose");

#ifdef USE_3D_VISUALIZATION
        while (!viewer->wasStopped ())
        {
          viewer->spinOnce (100);
          boost::this_thread::sleep (boost::posix_time::microseconds (100000));
        }
#endif
        waitKey();
        destroyWindow("initial pose");
  #endif
      }
      vector<PoseError>::iterator bestPoseIt = std::min_element(currentPoseErrors.begin(), currentPoseErrors.end());
      int bestPoseIdx = std::distance(currentPoseErrors.begin(), bestPoseIt);
      PoseError currentBestError = currentPoseErrors[bestPoseIdx];
      cout << "Best pose: " << currentBestError << endl;
      bestPoses.push_back(currentBestError);

      cout << "Result: " << occlusionPercentage << ", " << debugInfo.initialPoses.size() << ", " <<
              currentBestInitialError.getTranslationDifference() << ", " << currentBestInitialError.getRotationDifference(false) << ", " <<
              currentBestError.getTranslationDifference() << ", " << currentBestError.getRotationDifference(false) << endl;

#ifdef WRITE_ERRORS
      const float maxTrans = 0.02;
      if (currentPoseErrors[bestPoseIdx].getTranslationDifference() > maxTrans)
      {
        Mat glassMask = debugInfo.glassMask;
        std::stringstream str;
        str << testImageIdx;
        Mat segmentation = drawSegmentation(kinectBgrImage, glassMask);
        imwrite(errorsVisualizationPath + "/" + objectNames[0] + "_" + str.str() + "_mask.png", segmentation);

        Mat poseImage = kinectBgrImage.clone();
        detector.visualize(poses_cam, detectedObjectsNames, poseImage);
        imwrite(errorsVisualizationPath + "/" + objectNames[0] + "_" + str.str() + "_pose.png", poseImage);

        const float depthNormalizationFactor = 100;
        imwrite(errorsVisualizationPath + "/" + objectNames[0] + "_" + str.str() + "_depth.png", kinectDepth * depthNormalizationFactor);
      }
#endif
    }
#endif
    cout << endl;

#ifdef PROFILE
    return 0;
#endif
  }


  cout << "Segmentation failures: " << static_cast<float>(segmentationFailuresCount) / testIndices.size() << endl;
  cout << "Bad segmentation rate: " << static_cast<float>(badSegmentationCount) / testIndices.size() << endl;

/*
  cout << "Recognition statistics:" << endl;
  for (size_t i = 0; i < objectNames.size(); ++i)
  {
    cout << countNonZero(Mat(indicesOfRecognizedObjects) == i) << " ";
  }
  cout << endl;

  for (size_t i = 0; i < objectNames.size(); ++i)
  {
    cout << countNonZero(Mat(indicesOfRecognizedObjects) == i) / static_cast<float>(indicesOfRecognizedObjects.size())  << " ";
  }
  cout << endl;
*/

  if (objectNames.size() == 1)
  {

    float meanInitialPoseCount = std::accumulate(initialPoseCount.begin(), initialPoseCount.end(), 0);
    meanInitialPoseCount /= initialPoseCount.size();
    cout << "mean initial pose count: " << meanInitialPoseCount << endl;

    //TODO: move up
    const double cmThreshold = 2.0;

//    const double cmThreshold = 5.0;
    PoseError::evaluateErrors(bestPoses, cmThreshold);

    cout << "initial poses:" << endl;
    //TODO: move up
    PoseError::evaluateErrors(bestInitialPoses, 3.0 * cmThreshold);
  }

  cout << "Evaluation of geometric hashing" << endl;
  std::sort(allChamferDistances.begin(), allChamferDistances.end());
  const float successfulChamferDistance = 5.0f;
  int ghSuccessCount = 0;
  double meanChamferDistance = 0.0;
  for (size_t i = 0; i < allChamferDistances.size(); ++i)
  {
    cout << i << "\t: " << allChamferDistances[i] << endl;
    if (allChamferDistances[i] < successfulChamferDistance)
    {
      ++ghSuccessCount;
      meanChamferDistance += allChamferDistances[i];
    }
  }
  if (ghSuccessCount != 0)
  {
    meanChamferDistance /= ghSuccessCount;
  }
  int posesSum = std::accumulate(geometricHashingPoseCount.begin(), geometricHashingPoseCount.end(), 0);
  float meanInitialPoseCount = static_cast<float>(posesSum) / initialPoseCount.size();
  cout << "Mean number of initial poses: " << meanInitialPoseCount << endl;

  float ghSuccessRate = static_cast<float>(ghSuccessCount) / allChamferDistances.size();
  cout << "Success rate: " << ghSuccessRate << endl;
  cout << "Mean chamfer distance (px): " << meanChamferDistance << endl;

  double timesSum = std::accumulate(allRecognitionTimes.begin(), allRecognitionTimes.end(), 0.0);
  double meanRecognitionTime = timesSum / allRecognitionTimes.size();
  cout << "Mean recognition time (s): " << meanRecognitionTime << endl;

  std::system("date");
  return 0;
}
//TODO: undistort and scale the image
void Silhouette::affine2poseRT(const EdgeModel &edgeModel, const PinholeCamera &camera, const cv::Mat &affineTransformation, bool useClosedFormPnP, PoseRT &pose_cam) const
{
    PoseRT poseWithExtrinsics_cam;
    if (useClosedFormPnP)
    {
        CV_Assert(camera.cameraMatrix.type() == CV_64FC1);
        const float eps = 1e-6;
//    CV_Assert(fabs(camera.cameraMatrix.at<double>(0, 0) - camera.cameraMatrix.at<double>(1, 1)) < eps);
        CV_Assert(norm(camera.distCoeffs) < eps);

        Mat homography = affine2homography(affineTransformation);
        if (homography.type() != CV_64FC1)
        {
            Mat homographyDouble;
            homography.convertTo(homographyDouble, CV_64FC1);
            homography = homographyDouble;
        }

        //TODO: which inversion method is better?
        Mat fullTransform = camera.cameraMatrix.inv() * homography * camera.cameraMatrix;
        const int dim = 3;
        CV_Assert(fullTransform.rows == dim && fullTransform.cols == dim);
        CV_Assert(fullTransform.type() == CV_64FC1);
        Mat rotationalComponentWithScale = fullTransform(Range(0, 2), Range(0, 2));
        double det = determinant(rotationalComponentWithScale);
        CV_Assert(det > eps);
        double scale = 1.0 / sqrt(det);

        Point3d objectCenter = edgeModel.getObjectCenter();
        Point3d initialObjectCenter;
        transformPoint(initialPose_cam.getProjectiveMatrix(), objectCenter, initialObjectCenter);
        double meanZ = initialObjectCenter.z;
        CV_Assert(meanZ > eps);

        Point3d tvec;
        tvec.z = (scale - 1.0) * meanZ;
        tvec.x = fullTransform.at<double>(0, 2) * (scale * meanZ);
        tvec.y = fullTransform.at<double>(1, 2) * (scale * meanZ);

        Mat R = Mat::eye(dim, dim, fullTransform.type());
        Mat rotation2d = R(Range(0, 2), Range(0, 2));
        Mat pureRotationalComponent = rotationalComponentWithScale * scale;
        pureRotationalComponent.copyTo(rotation2d);

        Mat tvecMat;
        point2col(tvec, tvecMat);
        PoseRT pose2d_cam(R, tvecMat);

        poseWithExtrinsics_cam = pose2d_cam * initialPose_cam;
        pose_cam = camera.extrinsics.inv() * pose2d_cam * initialPose_cam;
    }
    else
    {
        vector<Point2f> projectedObjectPoints;
        camera.projectPoints(edgeModel.points, initialPose_cam, projectedObjectPoints);

        Mat transformedObjectPoints;
        transform(Mat(projectedObjectPoints), transformedObjectPoints, affineTransformation);

        solvePnP(Mat(edgeModel.points), transformedObjectPoints, camera.cameraMatrix, camera.distCoeffs, poseWithExtrinsics_cam.rvec, poseWithExtrinsics_cam.tvec, useClosedFormPnP);
        pose_cam = camera.extrinsics.inv() * poseWithExtrinsics_cam;
    }
}
Пример #8
0
int main(int argc, char *argv[])
{
	CmdLine cmd;
	std::string img_name;
	cmd.add(make_option('i', img_name, "imgname"));
	try {
		if (argc == 1) throw std::string("Invalid command line parameter.");
		cmd.process(argc, argv);
	}
	catch (const std::string& s) {
		std::cerr << "Feature detector \nUsage: " << argv[0] << "\n"
			<< "[-i|--imgname name]\n"
			<< std::endl;

		std::cerr << s << std::endl;
		return EXIT_FAILURE;
	}
	cv::Mat img(cv::imread(img_name, 0));
	assert(img.type() == CV_8UC1 && !img.empty());
	

	PinholeCamera *cam = new PinholeCamera(640.0, 480.0,
		517.306408, 516.469215, 318.643040, 255.313989,
		0.262383, -0.953104, -0.005358, 0.002628, 1.163314);
	ORBextractor *extractor = new ORBextractor();
	Timer timer;
	Frame frame(cam, img, 0, extractor);
	double time = timer.Stop();
	std::cout << "Original time " << time<< std::endl;
	std::cout << "frame keypoint num " << frame.keypoints_num_ << std::endl;
	cv::Mat img_rgb(img.size(), CV_8UC3);
	cv::cvtColor(img, img_rgb, CV_GRAY2RGB);
	std::vector<Feature> cv_feats = frame.features_;
	std::for_each(cv_feats.begin(), cv_feats.end(), [&](Feature i){
		cv::circle(img_rgb, i.keypoint_.pt, 4 * (i.keypoint_.octave + 1), cv::Scalar(0, 255, 0), 1);
		cv::circle(img_rgb, i.undistored_keypoint_.pt, 4 * (i.undistored_keypoint_.octave + 1), cv::Scalar(0, 0, 255), 1);
	});
	cv::imshow("Original", img_rgb);

	// 测试先对图像进行畸变矫正后进行特征提取
	cv::Mat un_img;
	std::vector<cv::KeyPoint> cv_keypoints;
	cv::Mat                   cv_descs;
	timer.Start();
	cam->undistortImage(img, un_img);
	(*extractor)(un_img, cv::Mat(), cv_keypoints, cv_descs);
	time = timer.Stop();
	std::cout << "Undistort frame time " << time << std::endl;
	std::cout << "Undistort frame keypoint num " << cv_keypoints.size() << std::endl;
	cv::Mat img_opencv(img.size(), CV_8UC3);
	cv::cvtColor(un_img, img_opencv, CV_GRAY2RGB);
	std::for_each(cv_keypoints.begin(), cv_keypoints.end(), [&](cv::KeyPoint i){
		cv::circle(img_opencv, i.pt, 4 * (i.octave + 1), cv::Scalar(0, 255, 0), 1);
	});
	cv::imshow("Undistort", img_opencv);
	cv::waitKey(0);

	delete cam;
	delete extractor;
	getchar();
	return 0;
}
void TODBaseImporter::importCamera(const std::string &filename, PinholeCamera &camera)
{
  camera.read(filename);
}
namespace example1 {

const string filename = findExampleDataFile("5pointExample1.txt");
SfM_data data;
bool readOK = readBAL(filename, data);
Rot3 c1Rc2 = data.cameras[1].pose().rotation();
Point3 c1Tc2 = data.cameras[1].pose().translation();
PinholeCamera<Cal3_S2> camera2(data.cameras[1].pose(), Cal3_S2());
EssentialMatrix trueE(c1Rc2, Unit3(c1Tc2));
double baseline = 0.1; // actual baseline of the camera

Point2 pA(size_t i) {
    return data.tracks[i].measurements[0].second;
}
Point2 pB(size_t i) {
    return data.tracks[i].measurements[1].second;
}
Vector vA(size_t i) {
    return EssentialMatrix::Homogeneous(pA(i));
}
Vector vB(size_t i) {
    return EssentialMatrix::Homogeneous(pB(i));
}

//*************************************************************************
TEST (EssentialMatrixFactor, testData) {
    CHECK(readOK);

    // Check E matrix
    Matrix expected(3, 3);
    expected << 0, 0, 0, 0, 0, -0.1, 0.1, 0, 0;
    Matrix aEb_matrix = skewSymmetric(c1Tc2.x(), c1Tc2.y(), c1Tc2.z())
                        * c1Rc2.matrix();
    EXPECT(assert_equal(expected, aEb_matrix, 1e-8));

    // Check some projections
    EXPECT(assert_equal(Point2(0, 0), pA(0), 1e-8));
    EXPECT(assert_equal(Point2(0, 0.1), pB(0), 1e-8));
    EXPECT(assert_equal(Point2(0, -1), pA(4), 1e-8));
    EXPECT(assert_equal(Point2(-1, 0.2), pB(4), 1e-8));

    // Check homogeneous version
    EXPECT(assert_equal(Vector3(-1, 0.2, 1), vB(4), 1e-8));

    // Check epipolar constraint
    for (size_t i = 0; i < 5; i++)
        EXPECT_DOUBLES_EQUAL(0, vA(i).transpose() * aEb_matrix * vB(i), 1e-8);

    // Check epipolar constraint
    for (size_t i = 0; i < 5; i++)
        EXPECT_DOUBLES_EQUAL(0, trueE.error(vA(i), vB(i)), 1e-7);
}

//*************************************************************************
TEST (EssentialMatrixFactor, factor) {
    for (size_t i = 0; i < 5; i++) {
        EssentialMatrixFactor factor(1, pA(i), pB(i), model1);

        // Check evaluation
        Vector expected(1);
        expected << 0;
        Matrix Hactual;
        Vector actual = factor.evaluateError(trueE, Hactual);
        EXPECT(assert_equal(expected, actual, 1e-7));

        // Use numerical derivatives to calculate the expected Jacobian
        Matrix Hexpected;
        typedef Eigen::Matrix<double,1,1> Vector1;
        Hexpected = numericalDerivative11<Vector1, EssentialMatrix>(
                        boost::bind(&EssentialMatrixFactor::evaluateError, &factor, _1,
                                    boost::none), trueE);

        // Verify the Jacobian is correct
        EXPECT(assert_equal(Hexpected, Hactual, 1e-8));
    }
}

//*************************************************************************
TEST (EssentialMatrixFactor, minimization) {
    // Here we want to optimize directly on essential matrix constraints
    // Yi Ma's algorithm (Ma01ijcv) is a bit cumbersome to implement,
    // but GTSAM does the equivalent anyway, provided we give the right
    // factors. In this case, the factors are the constraints.

    // We start with a factor graph and add constraints to it
    // Noise sigma is 1cm, assuming metric measurements
    NonlinearFactorGraph graph;
    for (size_t i = 0; i < 5; i++)
        graph.add(EssentialMatrixFactor(1, pA(i), pB(i), model1));

    // Check error at ground truth
    Values truth;
    truth.insert(1, trueE);
    EXPECT_DOUBLES_EQUAL(0, graph.error(truth), 1e-8);

    // Check error at initial estimate
    Values initial;
    EssentialMatrix initialE = trueE.retract(
                                   (Vector(5) << 0.1, -0.1, 0.1, 0.1, -0.1).finished());
    initial.insert(1, initialE);
#if defined(GTSAM_ROT3_EXPMAP) || defined(GTSAM_USE_QUATERNIONS)
    EXPECT_DOUBLES_EQUAL(643.26, graph.error(initial), 1e-2);
#else
    EXPECT_DOUBLES_EQUAL(639.84, graph.error(initial), 1e-2);
#endif

    // Optimize
    LevenbergMarquardtParams parameters;
    LevenbergMarquardtOptimizer optimizer(graph, initial, parameters);
    Values result = optimizer.optimize();

    // Check result
    EssentialMatrix actual = result.at<EssentialMatrix>(1);
    EXPECT(assert_equal(trueE, actual, 1e-1));

    // Check error at result
    EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-4);

    // Check errors individually
    for (size_t i = 0; i < 5; i++)
        EXPECT_DOUBLES_EQUAL(0, actual.error(vA(i), vB(i)), 1e-6);

}

//*************************************************************************
TEST (EssentialMatrixFactor2, factor) {
    for (size_t i = 0; i < 5; i++) {
        EssentialMatrixFactor2 factor(100, i, pA(i), pB(i), model2);

        // Check evaluation
        Point3 P1 = data.tracks[i].p, P2 = data.cameras[1].pose().transform_to(P1);
        const Point2 pi = PinholeBase::Project(P2);
        Point2 reprojectionError(pi - pB(i));
        Vector expected = reprojectionError.vector();

        Matrix Hactual1, Hactual2;
        double d(baseline / P1.z());
        Vector actual = factor.evaluateError(trueE, d, Hactual1, Hactual2);
        EXPECT(assert_equal(expected, actual, 1e-7));

        // Use numerical derivatives to calculate the expected Jacobian
        Matrix Hexpected1, Hexpected2;
        boost::function<Vector(const EssentialMatrix&, double)> f = boost::bind(
                    &EssentialMatrixFactor2::evaluateError, &factor, _1, _2, boost::none,
                    boost::none);
        Hexpected1 = numericalDerivative21<Vector2, EssentialMatrix, double>(f, trueE, d);
        Hexpected2 = numericalDerivative22<Vector2, EssentialMatrix, double>(f, trueE, d);

        // Verify the Jacobian is correct
        EXPECT(assert_equal(Hexpected1, Hactual1, 1e-8));
        EXPECT(assert_equal(Hexpected2, Hactual2, 1e-8));
    }
}

//*************************************************************************
TEST (EssentialMatrixFactor2, minimization) {
    // Here we want to optimize for E and inverse depths at the same time

    // We start with a factor graph and add constraints to it
    // Noise sigma is 1cm, assuming metric measurements
    NonlinearFactorGraph graph;
    for (size_t i = 0; i < 5; i++)
        graph.add(EssentialMatrixFactor2(100, i, pA(i), pB(i), model2));

    // Check error at ground truth
    Values truth;
    truth.insert(100, trueE);
    for (size_t i = 0; i < 5; i++) {
        Point3 P1 = data.tracks[i].p;
        truth.insert(i, double(baseline / P1.z()));
    }
    EXPECT_DOUBLES_EQUAL(0, graph.error(truth), 1e-8);

    // Optimize
    LevenbergMarquardtParams parameters;
    // parameters.setVerbosity("ERROR");
    LevenbergMarquardtOptimizer optimizer(graph, truth, parameters);
    Values result = optimizer.optimize();

    // Check result
    EssentialMatrix actual = result.at<EssentialMatrix>(100);
    EXPECT(assert_equal(trueE, actual, 1e-1));
    for (size_t i = 0; i < 5; i++)
        EXPECT_DOUBLES_EQUAL(truth.at<double>(i), result.at<double>(i), 1e-1);

    // Check error at result
    EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-4);
}

//*************************************************************************
// Below we want to optimize for an essential matrix specified in a
// body coordinate frame B which is rotated with respect to the camera
// frame C, via the rotation bRc.

// The "true E" in the body frame is then
EssentialMatrix bodyE = cRb.inverse() * trueE;

//*************************************************************************
TEST (EssentialMatrixFactor3, factor) {

    for (size_t i = 0; i < 5; i++) {
        EssentialMatrixFactor3 factor(100, i, pA(i), pB(i), cRb, model2);

        // Check evaluation
        Point3 P1 = data.tracks[i].p;
        const Point2 pi = camera2.project(P1);
        Point2 reprojectionError(pi - pB(i));
        Vector expected = reprojectionError.vector();

        Matrix Hactual1, Hactual2;
        double d(baseline / P1.z());
        Vector actual = factor.evaluateError(bodyE, d, Hactual1, Hactual2);
        EXPECT(assert_equal(expected, actual, 1e-7));

        // Use numerical derivatives to calculate the expected Jacobian
        Matrix Hexpected1, Hexpected2;
        boost::function<Vector(const EssentialMatrix&, double)> f = boost::bind(
                    &EssentialMatrixFactor3::evaluateError, &factor, _1, _2, boost::none,
                    boost::none);
        Hexpected1 = numericalDerivative21<Vector2, EssentialMatrix, double>(f, bodyE, d);
        Hexpected2 = numericalDerivative22<Vector2, EssentialMatrix, double>(f, bodyE, d);

        // Verify the Jacobian is correct
        EXPECT(assert_equal(Hexpected1, Hactual1, 1e-8));
        EXPECT(assert_equal(Hexpected2, Hactual2, 1e-8));
    }
}

//*************************************************************************
TEST (EssentialMatrixFactor3, minimization) {

    // As before, we start with a factor graph and add constraints to it
    NonlinearFactorGraph graph;
    for (size_t i = 0; i < 5; i++)
        // but now we specify the rotation bRc
        graph.add(EssentialMatrixFactor3(100, i, pA(i), pB(i), cRb, model2));

    // Check error at ground truth
    Values truth;
    truth.insert(100, bodyE);
    for (size_t i = 0; i < 5; i++) {
        Point3 P1 = data.tracks[i].p;
        truth.insert(i, double(baseline / P1.z()));
    }
    EXPECT_DOUBLES_EQUAL(0, graph.error(truth), 1e-8);

    // Optimize
    LevenbergMarquardtParams parameters;
    // parameters.setVerbosity("ERROR");
    LevenbergMarquardtOptimizer optimizer(graph, truth, parameters);
    Values result = optimizer.optimize();

    // Check result
    EssentialMatrix actual = result.at<EssentialMatrix>(100);
    EXPECT(assert_equal(bodyE, actual, 1e-1));
    for (size_t i = 0; i < 5; i++)
        EXPECT_DOUBLES_EQUAL(truth.at<double>(i), result.at<double>(i), 1e-1);

    // Check error at result
    EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-4);
}

} // namespace example1
namespace example2 {

const string filename = findExampleDataFile("5pointExample2.txt");
SfM_data data;
bool readOK = readBAL(filename, data);
Rot3 aRb = data.cameras[1].pose().rotation();
Point3 aTb = data.cameras[1].pose().translation();
EssentialMatrix trueE(aRb, Unit3(aTb));

double baseline = 10; // actual baseline of the camera

Point2 pA(size_t i) {
    return data.tracks[i].measurements[0].second;
}
Point2 pB(size_t i) {
    return data.tracks[i].measurements[1].second;
}

boost::shared_ptr<Cal3Bundler> //
K = boost::make_shared<Cal3Bundler>(500, 0, 0);
PinholeCamera<Cal3Bundler> camera2(data.cameras[1].pose(), *K);

Vector vA(size_t i) {
    Point2 xy = K->calibrate(pA(i));
    return EssentialMatrix::Homogeneous(xy);
}
Vector vB(size_t i) {
    Point2 xy = K->calibrate(pB(i));
    return EssentialMatrix::Homogeneous(xy);
}

//*************************************************************************
TEST (EssentialMatrixFactor, extraMinimization) {
    // Additional test with camera moving in positive X direction

    NonlinearFactorGraph graph;
    for (size_t i = 0; i < 5; i++)
        graph.add(EssentialMatrixFactor(1, pA(i), pB(i), model1, K));

    // Check error at ground truth
    Values truth;
    truth.insert(1, trueE);
    EXPECT_DOUBLES_EQUAL(0, graph.error(truth), 1e-8);

    // Check error at initial estimate
    Values initial;
    EssentialMatrix initialE = trueE.retract(
                                   (Vector(5) << 0.1, -0.1, 0.1, 0.1, -0.1).finished());
    initial.insert(1, initialE);

#if defined(GTSAM_ROT3_EXPMAP) || defined(GTSAM_USE_QUATERNIONS)
    EXPECT_DOUBLES_EQUAL(643.26, graph.error(initial), 1e-2);
#else
    EXPECT_DOUBLES_EQUAL(639.84, graph.error(initial), 1e-2);
#endif

    // Optimize
    LevenbergMarquardtParams parameters;
    LevenbergMarquardtOptimizer optimizer(graph, initial, parameters);
    Values result = optimizer.optimize();

    // Check result
    EssentialMatrix actual = result.at<EssentialMatrix>(1);
    EXPECT(assert_equal(trueE, actual, 1e-1));

    // Check error at result
    EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-4);

    // Check errors individually
    for (size_t i = 0; i < 5; i++)
        EXPECT_DOUBLES_EQUAL(0, actual.error(vA(i), vB(i)), 1e-6);

}

//*************************************************************************
TEST (EssentialMatrixFactor2, extraTest) {
    for (size_t i = 0; i < 5; i++) {
        EssentialMatrixFactor2 factor(100, i, pA(i), pB(i), model2, K);

        // Check evaluation
        Point3 P1 = data.tracks[i].p;
        const Point2 pi = camera2.project(P1);
        Point2 reprojectionError(pi - pB(i));
        Vector expected = reprojectionError.vector();

        Matrix Hactual1, Hactual2;
        double d(baseline / P1.z());
        Vector actual = factor.evaluateError(trueE, d, Hactual1, Hactual2);
        EXPECT(assert_equal(expected, actual, 1e-7));

        // Use numerical derivatives to calculate the expected Jacobian
        Matrix Hexpected1, Hexpected2;
        boost::function<Vector(const EssentialMatrix&, double)> f = boost::bind(
                    &EssentialMatrixFactor2::evaluateError, &factor, _1, _2, boost::none,
                    boost::none);
        Hexpected1 = numericalDerivative21<Vector2, EssentialMatrix, double>(f, trueE, d);
        Hexpected2 = numericalDerivative22<Vector2, EssentialMatrix, double>(f, trueE, d);

        // Verify the Jacobian is correct
        EXPECT(assert_equal(Hexpected1, Hactual1, 1e-6));
        EXPECT(assert_equal(Hexpected2, Hactual2, 1e-8));
    }
}

//*************************************************************************
TEST (EssentialMatrixFactor2, extraMinimization) {
    // Additional test with camera moving in positive X direction

    // We start with a factor graph and add constraints to it
    // Noise sigma is 1, assuming pixel measurements
    NonlinearFactorGraph graph;
    for (size_t i = 0; i < data.number_tracks(); i++)
        graph.add(EssentialMatrixFactor2(100, i, pA(i), pB(i), model2, K));

    // Check error at ground truth
    Values truth;
    truth.insert(100, trueE);
    for (size_t i = 0; i < data.number_tracks(); i++) {
        Point3 P1 = data.tracks[i].p;
        truth.insert(i, double(baseline / P1.z()));
    }
    EXPECT_DOUBLES_EQUAL(0, graph.error(truth), 1e-8);

    // Optimize
    LevenbergMarquardtParams parameters;
    // parameters.setVerbosity("ERROR");
    LevenbergMarquardtOptimizer optimizer(graph, truth, parameters);
    Values result = optimizer.optimize();

    // Check result
    EssentialMatrix actual = result.at<EssentialMatrix>(100);
    EXPECT(assert_equal(trueE, actual, 1e-1));
    for (size_t i = 0; i < data.number_tracks(); i++)
        EXPECT_DOUBLES_EQUAL(truth.at<double>(i), result.at<double>(i), 1e-1);

    // Check error at result
    EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-4);
}

//*************************************************************************
TEST (EssentialMatrixFactor3, extraTest) {

    // The "true E" in the body frame is
    EssentialMatrix bodyE = cRb.inverse() * trueE;

    for (size_t i = 0; i < 5; i++) {
        EssentialMatrixFactor3 factor(100, i, pA(i), pB(i), cRb, model2, K);

        // Check evaluation
        Point3 P1 = data.tracks[i].p;
        const Point2 pi = camera2.project(P1);
        Point2 reprojectionError(pi - pB(i));
        Vector expected = reprojectionError.vector();

        Matrix Hactual1, Hactual2;
        double d(baseline / P1.z());
        Vector actual = factor.evaluateError(bodyE, d, Hactual1, Hactual2);
        EXPECT(assert_equal(expected, actual, 1e-7));

        // Use numerical derivatives to calculate the expected Jacobian
        Matrix Hexpected1, Hexpected2;
        boost::function<Vector(const EssentialMatrix&, double)> f = boost::bind(
                    &EssentialMatrixFactor3::evaluateError, &factor, _1, _2, boost::none,
                    boost::none);
        Hexpected1 = numericalDerivative21<Vector2, EssentialMatrix, double>(f, bodyE, d);
        Hexpected2 = numericalDerivative22<Vector2, EssentialMatrix, double>(f, bodyE, d);

        // Verify the Jacobian is correct
        EXPECT(assert_equal(Hexpected1, Hactual1, 1e-6));
        EXPECT(assert_equal(Hexpected2, Hactual2, 1e-8));
    }
}

} // namespace example2