// find pose estimation using orientation mapping of pointcloud with ransac
bool motionEstimationPnP (const std::vector<cv::Point2f>& imgPoints,
                          const std::vector<cv::Point3f>& pointCloud_1LR,
                          const cv::Mat& K,
                          cv::Mat& T, cv::Mat& R)
{
    if(pointCloud_1LR.size() <= 7 || imgPoints.size() <= 7 || pointCloud_1LR.size() != imgPoints.size()) {
        //something went wrong aligning 3D to 2D points..
        cerr << "NO MOVEMENT: couldn't find [enough] corresponding cloud points... (only " << pointCloud_1LR.size() << ")" <<endl;
        return false;
    }

    cv::Mat rvec;
    cv::Rodrigues(R, rvec);

    std::vector<int> inliers;

    double minVal,maxVal;
    cv::minMaxIdx(imgPoints,&minVal,&maxVal);

    std::vector<float > distCoeffVec; //just use empty vector.. images are allready undistorted..

    /*
     * solvePnPRansac(InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs,
     *                OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess=false, int iterationsCount=100,
     *                float reprojectionError=8.0, int minInliersCount=100, OutputArray inliers=noArray(), int flags=ITERATIVE )
     */
    //cv::solvePnPRansac(pointCloud_1LR, imgPoints, K, distCoeffVec, rvec, T);
    cv::solvePnPRansac(pointCloud_1LR, imgPoints, K, distCoeffVec, rvec, T, true, 1000, 0.006 * maxVal, 0.25 * (float)(imgPoints.size()), inliers, CV_EPNP);
    rvec.convertTo(rvec, CV_32F);
    T.convertTo(T, CV_32F);

    // calculate reprojection error and define inliers
    std::vector<cv::Point2f> projected3D;
    cv::projectPoints(pointCloud_1LR, rvec, T, K, distCoeffVec, projected3D);
    if(inliers.size() == 0){

        for(unsigned int i=0;i<projected3D.size();i++) {
            //std::cout << "repro error " << norm(projected3D[i]-imgPoints[i])  << std::endl;
            if(norm(projected3D[i]-imgPoints[i]) < 10.0)
                inliers.push_back(i);
        }
    }


    if(inliers.size() < (float)(imgPoints.size())/5.0) {
        std::cout << "NO MOVEMENT: not enough inliers to consider a good pose ("<<inliers.size()<<"/"<<imgPoints.size()<<")" << std::endl;
        return false;
    }

    cv::Rodrigues(rvec, R);
    R.convertTo(R, CV_32F);
    if(!CheckCoherentRotation(R)) {
        std::cout <<  "NO MOVEMENT: rotation is incoherent..." << std::endl;
        return false;
    }

    return true;
}
/* ------------------------------------------------------------------------- */
bool MainWindow::FindPoseEstimation(
        cv::Mat_<double>& rvec,
        cv::Mat_<double>& t,
        cv::Mat_<double>& R,
        std::vector<cv::Point3f> ppcloud,
        std::vector<cv::Point2f> imgPoints
        )
{
    if(ppcloud.size() <= 7 || imgPoints.size() <= 7 || ppcloud.size() != imgPoints.size()) {
        //something went wrong aligning 3D to 2D points..
        cerr << "couldn't find [enough] corresponding cloud points... (only " << ppcloud.size() << ")" <<endl;
        return false;
    }

    vector<int> inliers;

    //use CPU
    double minVal,maxVal;
    cv::minMaxIdx(imgPoints,&minVal,&maxVal);
    cv::solvePnPRansac(ppcloud, imgPoints, K, distcoeff, rvec, t,
                       true, 1000, 0.006 * maxVal, 0.25 * (double)(imgPoints.size()), inliers, CV_EPNP);

    vector<cv::Point2f> projected3D;
    cv::projectPoints(ppcloud, rvec, t, K, distcoeff, projected3D);

    if(inliers.size()==0) { //get inliers
        for(unsigned int i=0;i<projected3D.size();i++) {
            if(norm(projected3D[i]-imgPoints[i]) < 5.0)
                inliers.push_back(i);
        }
    }

    //cv::Rodrigues(rvec, R);
    //visualizerShowCamera(R,t,0,255,0,0.1);

    if(inliers.size() < (double)(imgPoints.size())/5.0) {
        cerr << "not enough inliers to consider a good pose ("<<inliers.size()<<"/"<<imgPoints.size()<<")"<< endl;
        return false;
    }

    if(cv::norm(t) > 200.0) {
        // this is bad...
        cerr << "estimated camera movement is too big, skip this camera\r\n";
        return false;
    }

    cv::Rodrigues(rvec, R);
    if(!CheckCoherentRotation(R)) {
        cerr << "rotation is incoherent. we should try a different base view..." << endl;
        return false;
    }

    std::cout << "found t = " << t << "\nR = \n"<<R<<std::endl;
    return true;
}
void EstimateCameraPose::calcCameraPoseFromE()
{
	cv::SVD svd(E, CV_SVD_MODIFY_A);
	cv::Mat svd_u = svd.u;
	cv::Mat svd_vt = svd.vt;
	cv::Mat svd_w = svd.w;
	cv::Matx33d W(0, -1, 0, 1, 0, 0, 0, 0, 1);
	cv::Matx33d W_t;
	cv::transpose(W, W_t);
	cv::Mat_<double> R = svd_u * cv::Mat(W) * svd_vt; //HZ 9.19
	cv::Mat_<double> R2 = svd_u * cv::Mat(W_t) * svd_vt;
	cv::Mat_<double> t = svd_u.col(2); //u3
	cv::Mat_<double> t2 = -t;
	if (!CheckCoherentRotation(R)||!CheckCoherentRotation(R2)) {
		std::cout << "resulting rotation is not coherent\n";
		return;
	}
	cv::Matx34d origin = cv::Matx34d(1, 0, 0, 0 ,0, 1, 0, 0, 0, 0, 1, 0);
	M1 = intrinsic*origin;
	//four possible camera matrices
	M2.resize(4);
	M2[0] = cv::Matx34d(R(0, 0), R(0, 1), R(0, 2), t(0),
		R(1, 0), R(1, 1), R(1, 2), t(1),
		R(2, 0), R(2, 1), R(2, 2), t(2));
	M2[1] = cv::Matx34d(R(0, 0), R(0, 1), R(0, 2), t2(0),
		R(1, 0), R(1, 1), R(1, 2), t2(1),
		R(2, 0), R(2, 1), R(2, 2), t2(2));
	M2[2] = cv::Matx34d(R2(0, 0), R2(0, 1), R2(0, 2), t2(0),
		R2(1, 0), R2(1, 1), R2(1, 2), t2(1),
		R2(2, 0), R2(2, 1), R2(2, 2), t2(2));
	M2[3] = cv::Matx34d(R2(0, 0), R2(0, 1), R2(0, 2), t(0),
		R2(1, 0), R2(1, 1), R2(1, 2), t(1),
		R2(2, 0), R2(2, 1), R2(2, 2), t(2));
	for (int i = 0; i < 4; i++)
	{
		M2[i] = intrinsic*M2[i];
	}
	//homogeneous 3D points
	cv::Mat points4d;
	//record the number of zeros that are negative
	std::vector<int> falsezero={0,0,0,0};
	points3d.resize(4);
	for (int i = 0; i < M2.size(); i++)
	{
		triangulatePoints(M1, M2[i], matchedpoints1,
			matchedpoints2, points4d);
		for (int j = 0; j < points4d.cols; j++)
		{
			//convert homogeneous 3D points to 3D points
			float w = points4d.at<float>(3, j);
			float x = points4d.at<float>(0, j) / w;
			float y = points4d.at<float>(1, j) / w;
			float z = points4d.at<float>(2, j) / w;
			cv::Point3d p(x, y, z);
			if (z < 0)
			{
				falsezero[i]++;
			}
			points3d[i].push_back(p);
		}
	}
	int i = 0;
	int small = falsezero[0];
	for (int j = 1; j < 4; j++)
	{
		if (falsezero[j] < small)
		{
			small = falsezero[j];
			i = j;
		}
	}
	truepoints3d = points3d[i];
		switch (i)
		{
		case 0:
		{
			rotation = R;
			translation = t;
			break;
		}
		case 1:
		{
			rotation = R;
			translation = t2;
			break;
		}
		case 2:
		{
			rotation = R2;
			translation = t2;
			break;
		}
		case 3:
		{
			rotation = R2;
			translation = t;
			break;
		}
		default:
			break;
		}
}