/**
 * @author      JIA Pei
 * @version     2010-02-05
 * @brief       Calculate statistics for all profiles; Computer all landmarks' mean prof and covariance matrix
 * @return      void
*/
void VO_ASMNDProfiles::VO_CalcStatistics4AllProfiles()
{
    // Calcuate Inverse of Sg
    for(unsigned int i = 0; i < this->m_iNbOfPyramidLevels; i++)
    {
        Mat_<float> allProfiles = Mat_<float>::zeros( this->m_iNbOfSamples, this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength() );
        Mat_<float> Covar = Mat_<float>::zeros(this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength(), 
                                                this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength() );
        Mat_<float> Mean = Mat_<float>::zeros(1, this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength() );
        for(unsigned int j = 0; j < this->m_iNbOfPoints; j++)
        {
            for(unsigned int k = 0; k < this->m_iNbOfProfileDim; k++)
            {
                for(unsigned int l = 0; l < this->m_iNbOfSamples; l++)
                {
                    Mat_<float> tmpRow = allProfiles.row(l);
                    Mat_<float> tmp = this->m_vvvNormalizedProfiles[l][i][j].GetTheProfile().col(k).t();
                    tmp.copyTo(tmpRow);
                }

                // OK! Now We Calculate the Covariance Matrix of prof for Landmark iPoint
                cv::calcCovarMatrix( allProfiles, Covar, Mean, CV_COVAR_NORMAL+CV_COVAR_ROWS+CV_COVAR_SCALE, CV_32F);
//                cv::calcCovarMatrix( allProfiles, Covar, Mean, CV_COVAR_SCRAMBLED+CV_COVAR_ROWS, CV_32F);
                this->m_vvMeanNormalizedProfile[i][j].Set1DimProfile(Mean.t(), k);

                // Explained by YAO Wei, 2008-1-29.
                // Actually Covariance Matrix is semi-positive definite. But I am not sure
                // whether it is invertible or not!!!
                // In my opinion it is un-invert, since C = X.t() * X!!!
                cv::invert(Covar, this->m_vvvCVMInverseOfSg[i][j][k], DECOMP_SVD);
            }
        }
    }
}
static void calcKeyPointProjections( const vector<KeyPoint>& src, const Mat_<double>& H, vector<KeyPoint>& dst )
{
    if(  !src.empty() )
    {
        CV_Assert( !H.empty() && H.cols == 3 && H.rows == 3);
        dst.resize(src.size());
        vector<KeyPoint>::const_iterator srcIt = src.begin();
        vector<KeyPoint>::iterator       dstIt = dst.begin();
        for( ; srcIt != src.end(); ++srcIt, ++dstIt )
        {
            Point2f dstPt = applyHomography(H, srcIt->pt);

            float srcSize2 = srcIt->size * srcIt->size;
            Mat_<double> M(2, 2);
            M(0,0) = M(1,1) = 1./srcSize2;
            M(1,0) = M(0,1) = 0;
            Mat_<double> invM; invert(M, invM);
            Mat_<double> Aff; linearizeHomographyAt(H, srcIt->pt, Aff);
            Mat_<double> dstM; invert(Aff*invM*Aff.t(), dstM);
            Mat_<double> eval; eigen( dstM, eval );
            CV_Assert( eval(0,0) && eval(1,0) );
            float dstSize = (float)pow(1./(eval(0,0)*eval(1,0)), 0.25);

            // TODO: check angle projection
            float srcAngleRad = (float)(srcIt->angle*CV_PI/180);
            Point2f vec1(cos(srcAngleRad), sin(srcAngleRad)), vec2;
            vec2.x = (float)(Aff(0,0)*vec1.x + Aff(0,1)*vec1.y);
            vec2.y = (float)(Aff(1,0)*vec1.x + Aff(0,1)*vec1.y);
            float dstAngleGrad = fastAtan2(vec2.y, vec2.x);

            *dstIt = KeyPoint( dstPt, dstSize, dstAngleGrad, srcIt->response, srcIt->octave, srcIt->class_id );
        }
    }
}
Example #3
0
Matx33d FivePoints::getBestCandidate(std::vector< Matx33d > candidates, Mat_<double> HQ1, Mat_<double> HQ2)
{
    Matx33d best_candidate;
    Mat_<double> candidates_error_matrix(HQ1.cols, HQ1.cols);
    double min_candidates_error = 1000;
    double candidate_error;

    // choose best E based on reprojection error
    // e = Q1'*E*Q2
    for(int k=0; k<candidates.size(); ++k)
    {
        candidates_error_matrix = HQ1.t()*Mat_<double>(candidates[k])*HQ2;

        candidate_error = 0;
        for(int n=0; n<HQ1.cols; ++n)
        candidate_error += candidates_error_matrix(n,n);

        if( fabs(candidate_error) < min_candidates_error )
        {
            min_candidates_error = fabs(candidate_error);
            best_candidate = candidates[k];
        }
    }

    return best_candidate;
}
Example #4
0
Mat_<double> worldHomToCameraHom(
    Mat_<double> const worldPtsHom,
    Mat_<double> const rotMatrix,
    Mat_<double> const translation)
{
    assert(worldPtsHom.cols == 4);
    assert(rotMatrix.rows == 3);
    assert(rotMatrix.cols == 3);
    assert(translation.rows == 3);
    assert(translation.cols == 1);

    // Convert rotMatrix + translation into a linear transformation in
    // homogeneous coordinates.
    Mat_<double> rigidMotion = Mat_<double>::zeros(4, 4);
    rotMatrix.copyTo(rigidMotion(Range(0, 3), Range(0, 3)));
    translation.copyTo(rigidMotion(Range(0, 3), Range(3, 4)));
    rigidMotion(3, 3) = 1;
    // cout << "rigidMotion: " << rigidMotion << endl;

    // Assuming camera calibration matrix is identity.
    // Note that OpenCV treats size as "[cols rows]", so matrix multiplication
    // has to be done backwards (transpose everything).
    Mat_<double> projection = Mat_<double>::eye(3, 4);
    Mat result = projection * rigidMotion * worldPtsHom.t();
    return result.t();
}
Point3f GetPupilPosition(Mat_<double> eyeLdmks3d){
	
	eyeLdmks3d = eyeLdmks3d.t();

	Mat_<double> irisLdmks3d = eyeLdmks3d.rowRange(0,8);

	Point3f p (mean(irisLdmks3d.col(0))[0], mean(irisLdmks3d.col(1))[0], mean(irisLdmks3d.col(2))[0]);
	return p;
}
Example #6
0
void EllipticKeyPoint::calcProjection( const Mat_<double>& H, EllipticKeyPoint& projection ) const
{
    Point2f dstCenter = applyHomography(H, center);

    Mat_<double> invM; invert(getSecondMomentsMatrix(), invM);
    Mat_<double> Aff; linearizeHomographyAt(H, center, Aff);
    Mat_<double> dstM; invert(Aff*invM*Aff.t(), dstM);

    projection = EllipticKeyPoint( dstCenter, Scalar(dstM(0,0), dstM(0,1), dstM(1,1)) );
}
void *processFrame (void *arg) {

    int thread_id = *(int *)arg;
    while(do_work()) {

        pthread_cond_wait(&thread_cond[thread_id], &thread_cond_mutexes[thread_id]);
        setThreadBusy (thread_id);

        Mat frame = thread_frames[thread_id];
        Mat_<double> imagePts;
        Mat_<double> simplePose;

        imagePts = detectCorners(frame);
        bool success = (imagePts.rows > 0);
        if(success) {
            imagePts = calibrateImagePoints(imagePts);
            std_msgs::Float64MultiArray cornersMsg = makeCornersMsg(imagePts);
            cornersPub.publish(cornersMsg);
            simplePose = estimatePose(imagePts);

            std::cout << simplePose.t() << std::endl;

            std_msgs::Float64MultiArray simplePoseMsg = \
                makeSimplePoseMsg(simplePose);
            simplePosePub.publish(simplePoseMsg);
//            ROS_INFO("Tick.");
        } else {
            // Don't publish anything this tick.
            ROS_INFO("Could not detect all corners!");
        }

        frame.release();
        setThreadIdle (thread_id);
    }
    ROS_INFO("Thread %d exiting.", thread_id);    pthread_exit(NULL);
    
}
Example #8
0
void findEssentialMatrix(MFramePair& pair, Mat_<double> K) {
    vector<Point2f> k1, k2,n1,n2;
    vector<int> usedIndex1,usedIndex2;
    vector<uchar> status(pair.imgpts1.size());
    Mat F = findFundamentalMat(pair.matchPts1, pair.matchPts2, CV_FM_RANSAC, 0.2, 0.9,
                               status);
    Mat_<double> E;
    E = K.t() * F * K; //according to HZ (9.12)
    for (unsigned int i = 0; i < status.size(); i++) { // queryIdx is the "left" image
        if (status[i]) {
            usedIndex1.push_back(pair.matchedIndex1[i]);
            k1.push_back(pair.matchPts1[i]);
            usedIndex2.push_back(pair.matchedIndex2[i]);
            k2.push_back(pair.matchPts2[i]);
        }
    }
    correctMatches(F,k1,k2,n1,n2);
    pair.matchPts1 = n1;
    pair.matchPts2 = n2;
    pair.matchedIndex1=usedIndex1;
    pair.matchedIndex2=usedIndex2;
    pair.F = F;
    pair.E = E;
}
void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const CLMTracker::CLM& clm_model, double timestamp_seconds, bool online, bool visualise)
{
	// Check if a reset is needed first (TODO same person no reset)
	//if(face_bounding_box.area() > 0)
	//{
	//	Rect_<double> new_bounding_box = clm.GetBoundingBox();

	//	// If the box overlaps do not need a reset
	//	double intersection_area = (face_bounding_box & new_bounding_box).area();
	//	double union_area = face_bounding_box.area() + new_bounding_box.area() - 2 * intersection_area;

	//	// If the model is already tracking what we're detecting ignore the detection, this is determined by amount of overlap
	//	if( intersection_area/union_area < 0.5)
	//	{
	//		this->Reset();
	//	}

	//	face_bounding_box = new_bounding_box;
	//}
	//if(!clm.detection_success)
	//{
	//	this->Reset();
	//}

	frames_tracking++;

	// First align the face if tracking was successfull
	if(clm_model.detection_success)
	{
		AlignFaceMask(aligned_face, frame, clm_model, triangulation, true, align_scale, align_width, align_height);
	}
	else
	{
		aligned_face = Mat(align_height, align_width, CV_8UC3);
		aligned_face.setTo(0);
	}

	if(aligned_face.channels() == 3)
	{
		cvtColor(aligned_face, aligned_face_grayscale, CV_BGR2GRAY);
	}
	else
	{
		aligned_face_grayscale = aligned_face.clone();
	}

	// Extract HOG descriptor from the frame and convert it to a useable format
	Mat_<double> hog_descriptor;
	Extract_FHOG_descriptor(hog_descriptor, aligned_face, this->num_hog_rows, this->num_hog_cols);

	// Store the descriptor
	hog_desc_frame = hog_descriptor;

	Vec3d curr_orient(clm_model.params_global[1], clm_model.params_global[2], clm_model.params_global[3]);
	int orientation_to_use = GetViewId(this->head_orientations, curr_orient);

	// Only update the running median if predictions are not high
	// That is don't update it when the face is expressive (just retrieve it)
	bool update_median = true;

	// TODO test if this would be useful or not
	//if(!this->AU_predictions.empty())
	//{
	//	for(size_t i = 0; i < this->AU_predictions.size(); ++i)
	//	{
	//		if(this->AU_predictions[i].second > 1)
	//		{
	//			update_median = false;				
	//			break;
	//		}
	//	}
	//}

	update_median = update_median & clm_model.detection_success;

	// A small speedup
	if(frames_tracking % 2 == 1)
	{
		UpdateRunningMedian(this->hog_desc_hist[orientation_to_use], this->hog_hist_sum[orientation_to_use], this->hog_desc_median, hog_descriptor, update_median, this->num_bins_hog, this->min_val_hog, this->max_val_hog);
	}	
	// Geom descriptor and its median
	geom_descriptor_frame = clm_model.params_local.t();
	
	if(!clm_model.detection_success)
	{
		geom_descriptor_frame.setTo(0);
	}

	// Stack with the actual feature point locations (without mean)
	Mat_<double> locs = clm_model.pdm.princ_comp * geom_descriptor_frame.t();
	
	cv::hconcat(locs.t(), geom_descriptor_frame.clone(), geom_descriptor_frame);
	
	// A small speedup
	if(frames_tracking % 2 == 1)
	{
		UpdateRunningMedian(this->geom_desc_hist, this->geom_hist_sum, this->geom_descriptor_median, geom_descriptor_frame, update_median, this->num_bins_geom, this->min_val_geom, this->max_val_geom);
	}

	// First convert the face image to double representation as a row vector
	Mat_<uchar> aligned_face_cols(1, aligned_face.cols * aligned_face.rows * aligned_face.channels(), aligned_face.data, 1);
	Mat_<double> aligned_face_cols_double;
	aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F);
	
	// TODO get rid of this completely as it takes too long?
	//UpdateRunningMedian(this->face_image_hist[orientation_to_use], this->face_image_hist_sum[orientation_to_use], this->face_image_median, aligned_face_cols_double, update_median, 256, 0, 255);

	// Visualising the median HOG
	if(visualise)
	{
		FaceAnalysis::Visualise_FHOG(hog_descriptor, num_hog_rows, num_hog_cols, hog_descriptor_visualisation);
	}

	// Perform AU prediction	
	AU_predictions_reg = PredictCurrentAUs(orientation_to_use);

	std::vector<std::pair<std::string, double>> AU_predictions_reg_corrected;
	if(online)
	{
		AU_predictions_reg_corrected = CorrectOnlineAUs(AU_predictions_reg, orientation_to_use, true, false, clm_model.detection_success);
	}

	// Keep only closer to in-plane faces
	double angle_norm = cv::sqrt(clm_model.params_global[2] * clm_model.params_global[2] + clm_model.params_global[3] * clm_model.params_global[3]);

	// Add the reg predictions to the historic data
	for (size_t au = 0; au < AU_predictions_reg.size(); ++au)
	{

		// Find the appropriate AU (if not found add it)		
		// Only add if the detection was successful and not too out of plane
		if(clm_model.detection_success && angle_norm < 0.4)
		{
			AU_predictions_reg_all_hist[AU_predictions_reg[au].first].push_back(AU_predictions_reg[au].second);
		}
		else
		{
			AU_predictions_reg_all_hist[AU_predictions_reg[au].first].push_back(-100.0);
		}
	}
	
	AU_predictions_class = PredictCurrentAUsClass(orientation_to_use);

	for (size_t au = 0; au < AU_predictions_class.size(); ++au)
	{

		// Find the appropriate AU (if not found add it)		
		// Only add if the detection was successful and not too out of plane
		if(clm_model.detection_success && angle_norm < 0.4)
		{
			AU_predictions_class_all_hist[AU_predictions_class[au].first].push_back(AU_predictions_class[au].second);
		}
		else
		{
			AU_predictions_class_all_hist[AU_predictions_class[au].first].push_back(-100.0);
		}
	}
	

	if(online)
	{
		AU_predictions_reg = AU_predictions_reg_corrected;
	}

	this->current_time_seconds = timestamp_seconds;

	view_used = orientation_to_use;
			
	bool success = clm_model.detection_success && angle_norm < 0.4;

	confidences.push_back(clm_model.detection_certainty);
	valid_preds.push_back(success);
}
void computePoseDifference(Mat img1, Mat img2, CommandArgs args, Mat k, Mat& dist_coefficients, double& worldScale, Mat& R, Mat& t, Mat& img_matches)
{
   cout << "%===============================================%" << endl;

   Mat camera_matrix = k.clone();
   if (args.resize_factor > 1) 
   {
      resize(img1, img1, Size(img1.cols / args.resize_factor, 
               img1.rows / args.resize_factor)); // make smaller for performance and displayablity
      resize(img2, img2, Size(img2.cols / args.resize_factor,
               img2.rows / args.resize_factor));
      // scale matrix down according to changed resolution
      camera_matrix = camera_matrix / args.resize_factor;
      camera_matrix.at<double>(2,2) = 1;
   }

   Mat K1, K2;
   K1 = K2 = camera_matrix;
   if (img1.rows > img1.cols) // it is assumed the camera has been calibrated in landscape mode, so undistortion must also be performed in landscape orientation, or the camera matrix must be modified (fx,fy and cx,cy need to be exchanged)
   {
      swap(K1.at<double>(0,0), K1.at<double>(1,1));
      swap(K1.at<double>(0,2), K1.at<double>(1,2));
   }
   if (img2.rows > img2.cols)
   {
      swap(K2.at<double>(0,0), K2.at<double>(1,1));
      swap(K2.at<double>(0,2), K2.at<double>(1,2));
   }

   // Feature detection + extraction
   vector<KeyPoint> KeyPoints_1, KeyPoints_2;
   Mat descriptors_1, descriptors_2;

   Ptr<Feature2D> feat_detector;
   if (args.detector == DETECTOR_KAZE) 
   {
      feat_detector = AKAZE::create(args.detector_data.upright ? AKAZE::DESCRIPTOR_MLDB_UPRIGHT : AKAZE::DESCRIPTOR_MLDB, 
            args.detector_data.descriptor_size,
            args.detector_data.descriptor_channels,
            args.detector_data.threshold,
            args.detector_data.nOctaves,
            args.detector_data.nOctaveLayersAkaze);

   } else if (args.detector == DETECTOR_SURF)
   {
      feat_detector = xfeatures2d::SURF::create(args.detector_data.minHessian, 
            args.detector_data.nOctaves, args.detector_data.nOctaveLayersAkaze, args.detector_data.extended, args.detector_data.upright);
   } else if (args.detector == DETECTOR_SIFT)
   {
      feat_detector = xfeatures2d::SIFT::create(args.detector_data.nFeatures, 
            args.detector_data.nOctaveLayersSift, args.detector_data.contrastThreshold, args.detector_data.sigma);
   }

   feat_detector->detectAndCompute(img1, noArray(), KeyPoints_1, descriptors_1);
   feat_detector->detectAndCompute(img2, noArray(), KeyPoints_2, descriptors_2);

   cout << "Number of feature points (img1, img2): " << "(" << KeyPoints_1.size() << ", " << KeyPoints_2.size() << ")" << endl;

   // Find correspondences
   BFMatcher matcher;
   vector<DMatch> matches;
   if (args.use_ratio_test) 
   {
      if (args.detector == DETECTOR_KAZE) 
         matcher = BFMatcher(NORM_HAMMING, false);
      else matcher = BFMatcher(NORM_L2, false);

      vector<vector<DMatch>> match_candidates;
      const float ratio = args.ratio;
      matcher.knnMatch(descriptors_1, descriptors_2, match_candidates, 2);
      for (int i = 0; i < match_candidates.size(); i++)
         if (match_candidates[i][0].distance < ratio * match_candidates[i][1].distance)
            matches.push_back(match_candidates[i][0]);

      cout << "Number of matches passing ratio test: " << matches.size() << endl;

   } else
   {
      if (args.detector == DETECTOR_KAZE) 
         matcher = BFMatcher(NORM_HAMMING, true);
      else matcher = BFMatcher(NORM_L2, true);
      matcher.match(descriptors_1, descriptors_2, matches);
      cout << "Number of matching feature points: " << matches.size() << endl;
   }


   // Convert correspondences to vectors
   vector<Point2f>imgpts1,imgpts2;

   for(unsigned int i = 0; i < matches.size(); i++) 
   {
      imgpts1.push_back(KeyPoints_1[matches[i].queryIdx].pt); 
      imgpts2.push_back(KeyPoints_2[matches[i].trainIdx].pt); 
   }

   Mat mask; // inlier mask
   if (args.undistort) 
   {
      undistortPoints(imgpts1, imgpts1, K1, dist_coefficients, noArray(), K1);
      undistortPoints(imgpts2, imgpts2, K2, dist_coefficients, noArray(), K2);
   } 

   double focal = camera_matrix.at<double>(0,0);
   Point2d principalPoint(camera_matrix.at<double>(0,2),camera_matrix.at<double>(1,2));

   Mat E = findEssentialMat(imgpts1, imgpts2, focal, principalPoint, RANSAC, 0.999, 1, mask);
   /* Mat F = camera_matrix.t().inv() * E * camera_matrix.inv(); */
   Mat F = findFundamentalMat(imgpts1, imgpts2, CV_FM_RANSAC);

   correctMatches(F, imgpts1, imgpts2, imgpts1, imgpts2);
   cout << "Reprojection error:\n " << computeReprojectionError(imgpts1, imgpts2, mask, F) << endl;

   int inliers = recoverPose(E, imgpts1, imgpts2, R, t, focal, principalPoint, mask);

   cout << "Matches used for pose recovery:\n " << inliers << endl;

   /* Mat R1, R2, ProjMat1, ProjMat2, Q; */
   /* stereoRectify(camera_matrix, dist_coefficients, camera_matrix, dist_coefficients, img1.size(), R, t, R1, R2, ProjMat1, ProjMat2, Q); */
   /* cout << "P1=" << ProjMat1 << endl; */
   /* cout << "P2=" << ProjMat2 << endl; */
   /* cout << "Q=" << Q << endl; */

   Mat mtxR, mtxQ;
   Mat Qx, Qy, Qz;
   Vec3d angles = RQDecomp3x3(R, mtxR, mtxQ, Qx, Qy, Qz);
   /* cout << "Qx:\n " << Qx << endl; */
   /* cout << "Qy:\n " << Qy << endl; */
   /* cout << "Qz:\n " << Qz << endl; */
   cout << "Translation:\n " << t.t() << endl;
   cout << "Euler angles [x y z] in degrees:\n " << angles.t() << endl;

   if (args.epilines)
   {
      drawEpilines(Mat(imgpts1), 1, F, img2);
      drawEpilines(Mat(imgpts2), 2, F, img1);
   }

   drawMatches(img1, KeyPoints_1, img2, KeyPoints_2, // draw only inliers given by mask
         matches, img_matches, Scalar::all(-1), Scalar::all(-1), mask);

   vector<Point2f> imgpts1_masked, imgpts2_masked;
   for (int i = 0; i < imgpts1.size(); i++) 
   {
      if (mask.at<uchar>(i,0) == 1) 
      {
         imgpts1_masked.push_back(imgpts1[i]);
         imgpts2_masked.push_back(imgpts2[i]);
      }
   }

   Mat pnts4D;
   Mat P1 = camera_matrix * Mat::eye(3, 4, CV_64FC1), P2;
   Mat p2[2] = { R, t }; 
   hconcat(p2, 2, P2);
   P2 = camera_matrix * P2;

#define USE_OPENCV_TRIANGULATION
#ifndef USE_OPENCV_TRIANGULATION // strangely, both methods yield identical results
   vector<Point3d> homogPoints1, homogPoints2;
   for (int i = 0; i < imgpts1_masked.size(); i++) 
   {
      Point2f currentPoint1 = imgpts1_masked[i];
      homogPoints1.push_back(Point3d(currentPoint1.x, currentPoint1.y, 1));
      Point2f currentPoint2 = imgpts2_masked[i];
      homogPoints2.push_back(Point3d(currentPoint2.x, currentPoint2.y, 1));
   }

   Mat dehomogenized(imgpts1_masked.size(), 3, CV_64FC1);
   for (int i = 0; i < imgpts1_masked.size(); i++) 
   {
      Mat_<double> triangulatedPoint = IterativeLinearLSTriangulation(homogPoints1[i], P1, homogPoints2[i], P2);
      Mat r = triangulatedPoint.t();
      r.colRange(0,3).copyTo(dehomogenized.row(i)); // directly assigning to dehomogenized.row(i) compiles but does nothing, wtf?
   }
#else
   triangulatePoints(P1, P2, imgpts1_masked, imgpts2_masked, pnts4D);
   pnts4D = pnts4D.t();
   Mat dehomogenized;
   convertPointsFromHomogeneous(pnts4D, dehomogenized);
   dehomogenized = dehomogenized.reshape(1); // instead of 3 channels and 1 col, we want 1 channel and 3 cols
#endif


   double mDist = 0;
   int n = 0;
   int pos = 0, neg = 0;

   /* Write ply file header */
   ofstream ply_file("points.ply", ios_base::trunc);
   ply_file << 
      "ply\n"
      "format ascii 1.0\n"
      "element vertex " << dehomogenized.rows << "\n"
      "property float x\n"
      "property float y\n"
      "property float z\n"
      "property uchar red\n"
      "property uchar green\n"
      "property uchar blue\n"
      "end_header" << endl;

   Mat_<double> row;
   for (int i = 0; i < dehomogenized.rows; i++) 
   {
      row = dehomogenized.row(i);
      double d = row(2);
      if (d > 0) 
      {
         pos++;
         mDist += norm(row);
         n++;
         /* float startx=imgpts1_masked[i].x - 1, starty=imgpts1_masked[i].y - 1, endx=imgpts1_masked[i].x + 1, endy=imgpts1_masked[i].y + 1; */
         /* cout << "startx,endx = " << startx << "," << endx << endl; */
         /* cout << "starty,endy = " << starty << "," << endy << endl; */
         Vec3b rgb = img1.at<Vec3b>(imgpts1_masked[i].x, imgpts1_masked[i].y);
         ply_file << row(0) << " " << row(1) << " " << row(2) << " " << (int)rgb[2] << " " << (int)rgb[1] << " " << (int)rgb[0] << "\n";
      } else
      {
         neg++;
         ply_file << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << "\n"; 
      }
   }
   ply_file.close();
   mDist /= n;
   worldScale = mDist;
   cout << "Mean distance of " << n << " points to camera:\n " << mDist << " (dehomogenized)" << endl;
   cout << "pos=" << pos << ", neg=" << neg << endl;


   /* char filename[100]; */
   /* sprintf(filename, "mat_1%d", i+1); */

   /* Ptr<Formatter> formatter = Formatter::get(Formatter::FMT_CSV); */
   /* Ptr<Formatted> formatted = formatter->format(dehomogenized); */
   /* ofstream file(filename, ios_base::trunc); */
   /* file << formatted << endl; */

   /* Removed until cmake has been fathomed */
   /* vector< Point3d > points3D; */
   /* vector< vector< Point2d > > pointsImg; */
   /* int NPOINTS=dehomogenized.rows; // number of 3d points */
   /* int NCAMS=2; // number of cameras */

   /* points3D.resize(NPOINTS); */
   /* for (int i = 0; i < NPOINTS; i++) */ 
   /* { */
   /*    points3D[i] = Point3d(dehomogenized.at<double>(i,0), */
   /*          dehomogenized.at<double>(i,1), */
   /*          dehomogenized.at<double>(i,2) */
   /*          ); */
   /* } */
   /* // fill image projections */
   /* vector<vector<int> > visibility(2, vector<int>(NPOINTS, 1)); */
   /* vector<Mat> camera_matrices(2, camera_matrix); */
   /* vector<Mat> Rs(2); */
   /* Rodrigues(Mat::eye(3, 3, CV_64FC1), Rs[0]); */
   /* Rodrigues(R, Rs[0]); */
   /* vector<Mat> Ts = { Mat::zeros(3,1, CV_64FC1), t }; */
   /* vector<Mat> dist_coefficientss(2, dist_coefficients); */

   /* pointsImg.resize(NCAMS); */
   /* for(int i=0; i<NCAMS; i++) pointsImg[i].resize(NPOINTS); */
   /* for (int i = 0; i < NPOINTS; i++) */ 
   /* { */
   /*    pointsImg[0][i] = Point2d(imgpts1_masked[i].x, imgpts1_masked[i].y); */
   /*    pointsImg[1][i] = Point2d(imgpts2_masked[i].x, imgpts2_masked[i].y); */
   /* } */
   /*  cvsba::Sba sba; */
   /*   sba.run(points3D, pointsImg, visibility, camera_matrices, Rs, Ts, dist_coefficientss); */

   /*   cout<<"Initial error="<<sba.getInitialReprjError()<<". "<< */
   /*              "Final error="<<sba.getFinalReprjError()<<endl; */

   cout << "%===============================================%" << endl;
}
// Need to move this all to opengl
void DrawBox(Mat image, Vec6d pose, Scalar color, int thickness, float fx, float fy, float cx, float cy)
{
	float boxVerts[] = {-1, 1, -1,
						1, 1, -1,
						1, 1, 1,
						-1, 1, 1,
						1, -1, 1,
						1, -1, -1,
						-1, -1, -1,
						-1, -1, 1};
	Mat_<float> box = Mat(8, 3, CV_32F, boxVerts).clone() * 100;


	Matx33f rot = Euler2RotMat(Vec3d(pose[3], pose[4], pose[5]));
	Mat_<float> rotBox;
	
	Mat((Mat(rot) * box.t())).copyTo(rotBox);
	rotBox = rotBox.t();

	rotBox.col(0) = rotBox.col(0) + pose[0];
	rotBox.col(1) = rotBox.col(1) + pose[1];
	rotBox.col(2) = rotBox.col(2) + pose[2];

	// draw the lines
	Mat_<float> rotBoxProj;
	Project(rotBoxProj, rotBox, image.size(), fx, fy, cx, cy);
	
	Mat begin;
	Mat end;

	rotBoxProj.row(0).copyTo(begin);
	rotBoxProj.row(1).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
		
	rotBoxProj.row(1).copyTo(begin);
	rotBoxProj.row(2).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(2).copyTo(begin);
	rotBoxProj.row(3).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(0).copyTo(begin);
	rotBoxProj.row(3).copyTo(end);
	//std::cout << begin <<endl;
	//std::cout << end <<endl;
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(2).copyTo(begin);
	rotBoxProj.row(4).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(1).copyTo(begin);
	rotBoxProj.row(5).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(0).copyTo(begin);
	rotBoxProj.row(6).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(3).copyTo(begin);
	rotBoxProj.row(7).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(6).copyTo(begin);
	rotBoxProj.row(5).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(5).copyTo(begin);
	rotBoxProj.row(4).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
		
	rotBoxProj.row(4).copyTo(begin);
	rotBoxProj.row(7).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(7).copyTo(begin);
	rotBoxProj.row(6).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	

}
// main function.
int main(int argc, char** argv) {

	// image processing
	Mat imgOriginal;
	Mat imgGrayscale;
	Mat imgThresholded;
	Mat imgFeature;
	Mat imgTracked;

	// image processing
	while (true) {

		// load the image
		imgOriginal = imread(argv[1], CV_LOAD_IMAGE_COLOR);
		imgFeature = imgOriginal;
		if(! imgOriginal.data )
		{
			cout <<  "Could not open or find the image" << std::endl ;
			return -1;
		}

		// threshold original image.
		ThresholdImage(imgOriginal, imgGrayscale, imgThresholded);

		bool method = 0;	// default method is by simple geometry.
		if (method) {
			/* METHOD 1: SIMPLE GEOMETRY */

			// get image points
			Mat_<double> imagePts = getFeatureVector(imgThresholded, imgFeature);
			imagePts = calibrateImagePoints(getFeatureVector(imgThresholded, imgFeature));
//			cout << "imgPts = " << endl << imagePts << endl << endl;

			// get world points
			Mat_<double> worldPts = getWorldPts();
//			cout << "worldPts = " << endl << worldPts << endl << endl;

			// estimate pose
			Mat_<double> simplePose = estimatePose_GEO(imagePts, worldPts);
			cout << "simplePose = " << endl << simplePose.t() << endl;

		} else {
			/* METHOD 2: SIGULAR VALUE DECOMPOSITION*/

			// get image points
			Mat_<double> imagePts = calibrateImagePoints(getFeatureVector(imgThresholded, imgFeature));
//			cout << "imgPts = " << endl << imagePts << endl << endl;

			// get world points
			Mat_<double> worldPts = getWorldPts();

//			cout << "imgPts = " << endl << imagePts*f << endl << endl;
//			cout << "worldPts = " << endl << worldPts << endl << endl;


			// estimate pose
			Mat_<double> simplePose = estimatePose_SVD(imagePts, worldPts);
			cout << "simplePose = " << endl << simplePose.t() << endl;

		}


		// show images.
//		imshow("Original Image", imgOriginal); //show the original image
//		imshow("Grayscale Image", imgGrayscale); //show the original image
//		imshow("Thresholded Image", imgThresholded); //show the thresholded image
		imshow("Featured Image", imgFeature); //show the thresholded image

		// Press ESC to exit.
		if (waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
				{
			cout << "esc key is pressed by user" << endl;
			break;
		}
	}
	return 0;
}