void CRenderCenterDlg::OnBnClickedRate()
{
	// TODO: 在此添加控件通知处理程序代码
	if(img1.size()!=img2.size())
	{
		MessageBox(TEXT("Two images are not the same size!"),TEXT("error"),MB_OK);
		return;
	}
	RateBlending lp;
	Mat_<Vec3f> l; img1.convertTo(l,CV_32F,1.0/255.0);//Vec3f表示有三个通道,即 l[row][column][depth]  
	Mat_<Vec3f> r; img2.convertTo(r,CV_32F,1.0/255.0);
	Mat_<float> m(l.rows,l.cols,0.0);
	m(Range::all(),Range(0,m.cols/2)) = 1.0; 
	Mat_<Vec3f> blend =lp.RateBlend(l, r, m);
	blend.convertTo(imgfusion,CV_8UC3,255);

	CWnd *pWnd=GetDlgItem(IDC_IMGFUSION);
	CDC *pDC=pWnd->GetDC();
	HDC hDC=pDC->GetSafeHdc();
	IplImage img=imgfusion;
	CvvImage cimg;
	cimg.CopyOf(&img);
	CRect rect;
	GetDlgItem(IDC_IMGFUSION)->GetClientRect(&rect);
	cimg.DrawToHDC(hDC,&rect);
}
Mat_< float > Saliency::saliency( const Mat_< Vec3b >& im ) const {
	// Convert the image to the lab space
	Mat_<Vec3f> rgbim, labim;
	im.convertTo( rgbim, CV_32F, 1.0/255. );
	cvtColor( rgbim, labim, CV_BGR2Lab );
	
	// Superpixel superpixel_( 300, 50.0 );
	// Do the abstraction
	Mat_<int> segmentation = superpixel_.segment( labim );
	std::vector< SuperpixelStatistic > stat = superpixel_.stat( labim, im, segmentation );
	
	// Compute the uniqueness
	std::vector<float> unique( stat.size(), 1 );
	if (settings_.uniqueness_) {
		if (settings_.filter_uniqueness_)
			unique = uniquenessFilter( stat );
		else
			unique = uniqueness( stat );
	}
	
	// Compute the distribution
	std::vector<float> dist( stat.size(), 0 );
	if (settings_.distribution_) {
		if (settings_.filter_distribution_)
			dist = distributionFilter( stat );
		else
			dist = distribution( stat );
	}
	
	// Combine the two measures
	std::vector<float> sp_saliency( stat.size() );
	for( int i=0; i<stat.size(); i++ )
		sp_saliency[i] = unique[i] * exp( - settings_.k_ * dist[i] );
	
	// Upsampling
	Mat_<float> r;
	if (settings_.upsample_)
		r = assignFilter( im, segmentation, stat, sp_saliency );
	else
		r = assign( segmentation, sp_saliency );
	
	// Rescale the saliency to [0..1]
	double mn, mx;
	minMaxLoc( r, &mn, & mx );
	r = (r - mn) / (mx - mn);
	
	// Increase the saliency value until we are below the minimal threshold
	double m_sal = settings_.min_saliency_ * r.size().area();
	for( float sm = sum( r )[0]; sm < m_sal; sm = sum( r )[0] )
		r =  min( r*m_sal/sm, 1.0f );
	
	return r;
}
Beispiel #3
0
void opticalFlow::opticalFlowRefine(Mat_<Vec2f> &flow_in, Mat_<uchar> &occMap,const Mat_<Vec3b> &weightColorImg, Mat_<Vec2f> &flow_refined)
{
	Mat_<float> flow_in_single[2];
	split(flow_in,flow_in_single);
	Mat_<float> flow_out_single[2];
	Mat_<float> occ_fgs;
	occMap.convertTo(occ_fgs,CV_32FC1);
	occMap = occMap;
	multiply(flow_in_single[0],occ_fgs,flow_in_single[0]);
	multiply(flow_in_single[1],occ_fgs,flow_in_single[1]);
	FGS(flow_in_single[0],weightColorImg,flow_out_single[0], 0.01, 100);
	FGS(flow_in_single[1],weightColorImg,flow_out_single[1], 0.01, 100);
	FGS(occMap,weightColorImg,occ_fgs, 0.01, 100);
	divide(flow_out_single[0],occ_fgs,flow_out_single[0]);
	divide(flow_out_single[1],occ_fgs,flow_out_single[1]);

	merge(flow_out_single,2,flow_refined);
}
void PatchGenerator::warpWholeImage(const Mat& image, Mat& matT, Mat& buf,
                                    Mat& warped, int border, RNG& rng) const
{
    Mat_<double> T = matT;
    Rect roi(INT_MAX, INT_MAX, INT_MIN, INT_MIN);

    for( int k = 0; k < 4; k++ )
    {
        Point2f pt0, pt1;
        pt0.x = (float)(k == 0 || k == 3 ? 0 : image.cols);
        pt0.y = (float)(k < 2 ? 0 : image.rows);
        pt1.x = (float)(T(0,0)*pt0.x + T(0,1)*pt0.y + T(0,2));
        pt1.y = (float)(T(1,0)*pt0.x + T(1,1)*pt0.y + T(1,2));

        roi.x = std::min(roi.x, cvFloor(pt1.x));
        roi.y = std::min(roi.y, cvFloor(pt1.y));
        roi.width = std::max(roi.width, cvCeil(pt1.x));
        roi.height = std::max(roi.height, cvCeil(pt1.y));
    }

    roi.width -= roi.x - 1;
    roi.height -= roi.y - 1;
    int dx = border - roi.x;
    int dy = border - roi.y;

    if( (roi.width+border*2)*(roi.height+border*2) > buf.cols )
        buf.create(1, (roi.width+border*2)*(roi.height+border*2), image.type());

    warped = Mat(roi.height + border*2, roi.width + border*2,
                 image.type(), buf.data);

    T(0,2) += dx;
    T(1,2) += dy;
    (*this)(image, T, warped, warped.size(), rng);

    if( T.data != matT.data )
        T.convertTo(matT, matT.type());
}
int main(int argc, char **argv)
{
    fstream fs("test.txt", ios::out);
    VO_CohenDaubechiesFeauveau voCDF;
    VO_Coiflets vocoiflets;
    VO_Daubechies vodaubechies;
    VO_Haar vohaar;
    VO_Symlets vosymlets;
    VO_Gabor vogabor;
    vogabor.VO_PrepareGaborKernel(  4,
                                    2.0f, 
                                    0.0f,
                                    0.0f,
                                    4,
                                    1.0f);
    vogabor.GetWindowFunc()->VO_DisplayWindowFuncKernel("gaborkernel.jpg");
    Mat iImg = imread ( "/usr/local/share/opencv/samples/c/lena.jpg", 0 );
    cout << iImg.channels() << endl;
    
    Mat_<float> inputImg;
    iImg.copyTo(inputImg);
    inputImg.convertTo(inputImg, CV_32FC1);
    Mat_<float> waveletImg;
    cv::dft(inputImg, waveletImg );
    imwrite("dft.jpg", waveletImg);
    cv::idft(waveletImg, inputImg, DFT_SCALE);
    imwrite("idft.jpg", inputImg);
    Mat oImg = Mat::zeros(iImg.size(), iImg.type());
//    vogabor.VO_ForwardTransform(inputImg, waveletImg);
    vogabor.VO_ForwardTransform(inputImg, Point(256, 256), waveletImg);
    imwrite("gabored.jpg", waveletImg);
    vogabor.VO_BackwardTransform(waveletImg, inputImg);
    imwrite("igabored.jpg", inputImg);
    
    return 0;
}
int main(int argc, const char * argv[]) {

    //----------------------
    // Open an OpenNI device
    //----------------------

    //TODO: You'll want to open an RGB camera stream here too (the one with wich you wish to register the depth)

    cout << "Device opening ..." << endl;

    VideoCapture capture;
    capture.open( CAP_OPENNI );

    if( !capture.isOpened() )
    {
        cout << "Can not open a capture object." << endl;
        return -1;
    }

    // We don't want registration on, since we're going to do it ourselves.
    // Some devices with RGB cameras can perform registration on device
    bool modeRes=false;
    modeRes = capture.set( CAP_PROP_OPENNI_REGISTRATION, 0 );

    if (!modeRes) {
        cout << "Can't disable registration. That's crazy!\n" << endl;
        return -1;
    }

    // Display the current configuration
    cout << "\nDepth generator output mode:" << endl <<
        "FRAME_WIDTH      " << capture.get( CAP_PROP_FRAME_WIDTH ) << endl <<
        "FRAME_HEIGHT     " << capture.get( CAP_PROP_FRAME_HEIGHT ) << endl <<
        "FRAME_MAX_DEPTH  " << capture.get( CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl <<
        "FPS              " << capture.get( CAP_PROP_FPS ) << endl <<
        "REGISTRATION     " << capture.get( CAP_PROP_OPENNI_REGISTRATION ) << endl;


    //---------------------------------------
    // Specify camera properties and geometry
    //--------------------------------------

    //TODO: Fill in the values for your setup.

    // Depth camera intrinsics
    Matx33f unregisteredCameraMatrix = Matx33f::eye();
    unregisteredCameraMatrix(0,0) = 570.0f;
    unregisteredCameraMatrix(1,1) = 570.0f;
    unregisteredCameraMatrix(0,2) = 320.0f-0.5f;
    unregisteredCameraMatrix(1,2) = 240.0f-0.5f;

    // NOTE: The depth distortion coefficients are currently not used by the Registration class.
    Vec<float, 5> unregisteredDistCoeffs(0,0,0,0,0);


    // RGB camera intrinsics
    Matx33f registeredCameraMatrix = Matx33f::eye();
    registeredCameraMatrix(0,0) = 570.0f;
    registeredCameraMatrix(1,1) = 570.0f;
    registeredCameraMatrix(0,2) = 320.0f-0.5f;
    registeredCameraMatrix(1,2) = 240.0f-0.5f;

    Vec<float, 5> registeredDistCoeffs(0,0,0,0,0);

    Size2i registeredImagePlaneSize = Size2i(640, 480);

    // The rigid body transformation between cameras.
    // Used as: uv_rgb = K_rgb * [R | t] * z * inv(K_ir) * uv_ir
    Matx44f registrationRbt = Matx44f::eye();
    registrationRbt(0,3) = .04;


    //------------------------------
    // Create our registration class
    //------------------------------
    oc::Registration registration(unregisteredCameraMatrix,
                                  unregisteredDistCoeffs,
                                  registeredCameraMatrix,
                                  registeredDistCoeffs,
                                  registrationRbt);

    for (;;) {

        Mat_<uint16_t> depthMap;

        if( !capture.grab() )
        {
            cout << "Can't grab depth." << endl;
            return -1;
        }
        else
        {
            if( capture.retrieve( depthMap, CAP_OPENNI_DEPTH_MAP ) )
            {

                // Actually perform the registration
                Mat_<uint16_t> registeredDepth;
                bool performDilation = false;
                registration.registerDepthToColor(depthMap,
                                                  registeredImagePlaneSize,
                                                  registeredDepth,
                                                  performDilation);


                //Display the unregistered and registered depth
                const float scaleFactor = 0.05f;
                {
                    Mat_<uint8_t> show;
                    depthMap.convertTo( show, CV_8UC1, scaleFactor );
                    imshow( "depth map", show );
                }
                {
                    Mat_<uint8_t> show;
                    registeredDepth.convertTo( show, CV_8UC1, scaleFactor );
                    imshow( "registered map", show );
                }

            }

        }

        if( waitKey( 1 ) >= 0 )
            break;
    }



    return 0;
}
Beispiel #7
0
void visualize_3d_points(const Mat_<cv::Vec3f>& points_3d, std::string name) {
  Mat points_3d_display;
  points_3d.convertTo(points_3d_display, CV_8UC3, 100);
  imshow(name, points_3d_display);
}
Beispiel #8
0
    //
    // thanks again, Haris. i wouldn't be anywhere without your mind here.
    //
    Mat project3d(const Mat & test) const
    {
        PROFILEX("project3d");

        int mid = mdl.cols/2;
        int midi = test.cols/2;
        Rect R(mid-crop/2,mid-crop/2,crop,crop);
        Rect Ri(midi-crop/2,midi-crop/2,crop,crop);

        // get landmarks
        vector<Point2d> pts2d;
        getkp2d(test, pts2d, Ri);
        //cerr << "nose :" << pts2d[30].x << endl;

        // get pose mat for our landmarks
        Mat KP = pnp(test.size(), pts2d);

        // project img to head, count occlusions
        Mat_<uchar> test2(mdl.size(),127);
        Mat_<uchar> counts(mdl.size(),0);
	    for (int i=R.y; i<R.y+R.height; i++)
        {
            PROFILEX("proj_1");
	        for (int j=R.x; j<R.x+R.width; j++)
            {
                Mat1d p = project_vec(KP, i, j);
		        int x = int(p(0) / p(2));
		        int y = int(p(1) / p(2));
                if (y < 0 || y > test.rows - 1) continue;
                if (x < 0 || x > test.cols - 1) continue;
                // stare hard at the coord transformation ;)
                test2(i, j) = test.at<uchar>(y, x);
                // each point used more than once is occluded
                counts(y, x) ++;
	        }
        }

        // project the occlusion counts in the same way
        Mat_<uchar> counts1(mdl.size(),0);
	    for (int i=R.y; i<R.y+R.height; i++)
        {
            PROFILEX("proj_2");
	        for (int j=R.x; j<R.x+R.width; j++)
            {
                Mat1d p = project_vec(KP, i, j);
		        int x = int(p(0) / p(2));
		        int y = int(p(1) / p(2));
                if (y < 0 || y > test.rows - 1) continue;
                if (x < 0 || x > test.cols - 1) continue;
                counts1(i, j) = counts(y, x);
	        }
        }
        blur(counts1, counts1, Size(9,9));
        counts1 -= eyemask;
        counts1 -= eyemask;

        // count occlusions in left & right half
        Rect left (0,  0,mid,counts1.rows);
        Rect right(mid,0,mid,counts1.rows);
        double sleft=sum(counts1(left))[0];
        double sright=sum(counts1(right))[0];

        // fix occlusions with soft symmetry
        Mat_<double> weights;
        Mat_<uchar> sym = test2.clone();
        if (abs(sleft-sright)>symThresh)
        {
            PROFILEX("proj_3");

            // make weights
            counts1.convertTo(weights,CV_64F);

            Point p,P;
            double m,M;
            minMaxLoc(weights,&m,&M,&p,&P);

            double *wp = weights.ptr<double>();
            for (size_t i=0; i<weights.total(); ++i)
                wp[i] = (1.0 - 1.0 / exp(symBlend+(wp[i]/M)));
            // cerr << weights(Rect(mid,mid,6,6)) << endl;

            for (int i=R.y; i<R.y+R.height; i++)
            {
                if (sleft-sright>symThresh) // left side needs fixing
                {
                    for (int j=R.x; j<mid; j++)
                    {
                        int k = mdl.cols-j-1;
                        sym(i,j) = test2(i,j) * (1-weights(i,j)) + test2(i,k) * (weights(i,j));
                    }
                }
                if (sright-sleft>symThresh) // right side needs fixing
                {
                    for (int j=mid; j<R.x+R.width; j++)
                    {
                        int k = mdl.cols-j-1;
                        sym(i,j) = test2(i,j) * (1-weights(i,j)) + test2(i,k) * (weights(i,j));
                    }
                }
            }
        }

        if (DEBUG_IMAGES)
        {
            cerr << (sleft-sright) << "\t" << (abs(sleft-sright)>symThresh) << endl;
            imshow("proj",test2);
            if (abs(sleft-sright)>symThresh)
                imshow("weights", weights);
            Mat t = test.clone();
            rectangle(t,Ri,Scalar(255));
            for (size_t i=0; i<pts2d.size(); i++)
                circle(t, pts2d[i], 1, Scalar(0));
            imshow("test3",t);
        }

        Mat gray;
        sym.convertTo(gray,CV_8U);

        return sym(R);
    }
int main (int argc, char **argv)
{

	vector<string> arguments = get_arguments(argc, argv);

	// Some initial parameters that can be overriden from command line	
	vector<string> files, dDirs, outposes, outvideos, outfeatures;
	
	// By default try webcam
	int device = 0;

	// cx and cy aren't always half dimx or half dimy, so need to be able to override it (start with unit vals and init them if none specified)
    float fx = 500, fy = 500, cx = 0, cy = 0;
	int dimx = 0, dimy = 0;

	bool useCLMTracker = true;
	
	CLMWrapper::CLMParameters clmParams(arguments);
	
	clmParams.wSizeCurrent = clmParams.wSizeInit;

    PoseDetectorHaar::PoseDetectorHaarParameters haarParams;

	#if OS_UNIX
    haarParams.ClassifierLocation = "classifiers/haarcascade_frontalface_alt.xml";
	#else
		haarParams.ClassifierLocation = "classifiers/haarcascade_frontalface_alt.xml";
	#endif
		
	// Get the input output file parameters
	CLMWrapper::get_video_input_output_params(files, dDirs, outposes, outvideos, outfeatures, arguments);
	// Get camera parameters
	CLMWrapper::get_camera_params(fx, fy, cx, cy, dimx, dimy, arguments);    
	
	// The modules that are being used for tracking
	CLMTracker::TrackerCLM clmModel;	
	
	// Face detector initialisation
	CascadeClassifier classifier(haarParams.ClassifierLocation);
	if(classifier.empty())
	{
		string err = "Could not open a face detector at: " + haarParams.ClassifierLocation;
		FATAL_STREAM( err );
	}

	bool done = false;
	
	int f_n = -1;

	while(!done)
	{
		string file;

		// We might specify multiple video files as arguments
		if(files.size() > 0)
		{
			f_n++;			
		    file = files[f_n];
		}

		bool readDepth = !dDirs.empty();	

		// Do some grabbing
		VideoCapture vCap;
		if( file.size() > 0 )
		{
			INFO_STREAM( "Attempting to read from file: " << file );
			vCap = VideoCapture( file );
		}
		else
		{
			INFO_STREAM( "Attempting to capture from device: " << device );
			vCap = VideoCapture( device );

			// Read a first frame often empty in camera
			Mat img;
			vCap >> img;
		}

		if( !vCap.isOpened() ) FATAL_STREAM( "Failed to open video source" );
		else INFO_STREAM( "Device or file opened");

		Mat img;
		vCap >> img;

		// If no dimensions defined, do not do any resizing
		if(dimx == 0 || dimy == 0)
		{
			dimx = img.cols;
			dimy = img.rows;
		}
	
		// If optical centers are not defined just use center of image
		if(cx == 0 || cy == 0)
		{
			cx = dimx / 2.0f;
			cy = dimy / 2.0f;
		}
	
		// Creating output files
		std::ofstream posesFile;
		if(!outposes.empty())
		{
			posesFile.open (outposes[f_n]);
		}
	
		std::ofstream featuresFile;		
		if(!outfeatures.empty())
		{
			featuresFile.open(outfeatures[f_n]);
		}
	
		int frameProc = 0;

		// faces in a row detected
		int facesInRow = 0;

		// saving the videos
		VideoWriter writerFace;
		if(!outvideos.empty())
		{
			writerFace = VideoWriter(outvideos[f_n], CV_FOURCC('D','I','V','X'), 30, img.size(), true);		
		}

		// Variables useful for the tracking itself
		bool success = false;
		bool trackingInitialised = false;
	
		// For measuring the timings
		int64 t1,t0 = cv::getTickCount();
		double fps = 10;

		Mat disp;

		INFO_STREAM( "Starting tracking");
		while(!img.empty())
		{		

			Mat_<float> depth;
			Mat_<uchar> gray;
			cvtColor(img, gray, CV_BGR2GRAY);
		
			// Don't resize if it's unneeded
			Mat_<uchar> img_scaled;		
			if(dimx != gray.cols || dimy != gray.rows)
			{
				resize( gray, img_scaled, Size( dimx, dimy ) );
				resize(img, disp, Size( dimx, dimy));
			}
			else
			{
				img_scaled = gray;
				disp = img.clone();
			}
		
			namedWindow("colour",1);

			// Get depth image
			if(readDepth)
			{
				char* dst = new char[100];
				std::stringstream sstream;
				//sstream << dDir << "\\depth%06d.png";
				sstream << dDirs[f_n] << "\\depth%05d.png";
				sprintf(dst, sstream.str().c_str(), frameProc + 1);
				Mat_<short> dImg = imread(string(dst), -1);
				if(!dImg.empty())
				{
					if(dimx != dImg.cols || dimy != dImg.rows)
					{
						Mat_<short> dImgT;
						resize(dImg, dImgT, Size( dimx, dimy));
						dImgT.convertTo(depth, CV_32F);
					}
					else
					{
						dImg.convertTo(depth, CV_32F);
					}
				}
				else
				{
					WARN_STREAM( "Can't find depth image" );
				}
			}

			Vec6d poseEstimateHaar;
			Matx66d poseEstimateHaarUncertainty;

			Rect faceRegion;

			// The start place where CLM should start a search (or if it fails, can use the frame detection)
			if(!trackingInitialised || (!success && ( frameProc  % 2 == 0)))
			{
				INFO_STREAM( "Attempting to initialise a face");
				// The tracker can return multiple head pose observation
				vector<Vec6d> poseEstimatesInitialiser;
				vector<Matx66d> covariancesInitialiser;			
				vector<Rect> regionsInitialiser;

				bool initSuccess = PoseDetectorHaar::InitialisePosesHaar(img_scaled, depth, poseEstimatesInitialiser, covariancesInitialiser, regionsInitialiser, classifier, fx, fy, cx, cy, haarParams);
					
				if(initSuccess)
				{
					INFO_STREAM( "Face(s) detected");
					if(poseEstimatesInitialiser.size() > 1)
					{
						cout << "ambiguous detection: " << endl;
						// keep the closest one (this is a hack for the experiment)
						double best = 10000;
						int bestIndex = -1;
						for( size_t i = 0; i < poseEstimatesInitialiser.size(); ++i)
						{
							cout << regionsInitialiser[i].x << " " << regionsInitialiser[i].y <<  " " << regionsInitialiser[i].width << " " <<  regionsInitialiser[i].height << endl;
							if(poseEstimatesInitialiser[i][2] < best  && poseEstimatesInitialiser[i][2] > 100)
							{
								bestIndex = i;
								best = poseEstimatesInitialiser[i][2];
							}									
						}
						if(bestIndex != -1)
						{
							cout << "Choosing bbox:" << regionsInitialiser[bestIndex].x << " " << regionsInitialiser[bestIndex].y <<  " " << regionsInitialiser[bestIndex].width << " " <<  regionsInitialiser[bestIndex].height << endl;
							faceRegion = regionsInitialiser[bestIndex];
						}
						else
						{
							initSuccess = false;
						}
					}
					else
					{	
						faceRegion = regionsInitialiser[0];
					}				
	
					facesInRow++;
				}
			}

			// If condition for tracking is met initialise the trackers
			if(!trackingInitialised && facesInRow >= 1)
			{			
				INFO_STREAM( "Initialising CLM");
				trackingInitialised = CLMWrapper::InitialiseCLM(img_scaled, depth, clmModel, poseEstimateHaar, faceRegion, fx, fy, cx, cy, clmParams);		
				facesInRow = 0;
			}		

			// opencv detector is needed here, if tracking failed reinitialise using it
			if(trackingInitialised)
			{
				success = CLMWrapper::TrackCLM(img_scaled, depth, clmModel, vector<Vec6d>(), vector<Matx66d>(), faceRegion, fx, fy, cx, cy, clmParams);								
			}			
			if(success)
			{			
				clmParams.wSizeCurrent = clmParams.wSizeSmall;
			}
			else
			{
				clmParams.wSizeCurrent = clmParams.wSizeInit;
			}

			// Changes for no reinit version
			//success = true;
			//clmParams.wSizeCurrent = clmParams.wSizeInit;

			Vec6d poseEstimateCLM = CLMWrapper::GetPoseCLM(clmModel, fx, fy, cx, cy, clmParams);

			if(!outfeatures.empty())
			{
				featuresFile << frameProc + 1 << " " << success;
				for (int i = 0; i < 66 * 2; ++ i)
				{
					featuresFile << " " << clmModel._shape.at<double>(i) << endl;
				}
				featuresFile << endl;
			}

			if(!outposes.empty())
			{
				posesFile << frameProc + 1 << " " << (float)frameProc * 1000/30 << " " << 1 << " " << poseEstimateCLM[0] << " " << poseEstimateCLM[1] << " " << poseEstimateCLM[2] << " " << poseEstimateCLM[3] << " " << poseEstimateCLM[4] << " " << poseEstimateCLM[5] << endl;
			}										
	
			if(success)			
			{
				int idx = clmModel._clm.GetViewIdx(); 	

				// drawing the facial features on the face if tracking is successful
				clmModel._clm._pdm.Draw(disp, clmModel._shape, clmModel._clm._triangulations[idx], clmModel._clm._visi[0][idx]);

				DrawBox(disp, poseEstimateCLM, Scalar(255,0,0), 3, fx, fy, cx, cy);			
			}
			else if(!clmModel._clm._pglobl.empty())
			{			
				int idx = clmModel._clm.GetViewIdx(); 	
			
				// draw the facial features
				clmModel._clm._pdm.Draw(disp, clmModel._shape, clmModel._clm._triangulations[idx], clmModel._clm._visi[0][idx]);

				// if tracking fails draw a red outline
				DrawBox(disp, poseEstimateCLM, Scalar(0,0,255), 3, fx, fy, cx, cy);	
			}
			if(frameProc % 10 == 0)
			{      
				t1 = cv::getTickCount();
				fps = 10.0 / (double(t1-t0)/cv::getTickFrequency()); 
				t0 = t1;
			}

			char fpsC[255];
			sprintf(fpsC, "%d", (int)fps);
			string fpsSt("FPS:");
			fpsSt += fpsC;
			cv::putText(disp, fpsSt, cv::Point(10,20), CV_FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(255,0,0));
		
			frameProc++;
						
			imshow("colour", disp);
			if(!depth.empty())
			{
				imshow("depth", depth/2000.0);
			}

			vCap >> img;
		
			if(!outvideos.empty())
			{		
				writerFace << disp;
			}
		
			// detect key presses
			char c = cv::waitKey(2);

			// key detections

			// restart the tracker
			if(c == 'r')
			{
				trackingInitialised = false;
				facesInRow = 0;
			}
			// quit the application
			else if(c=='q')
			{
				return(0);
			}


		}
		
		trackingInitialised = false;
		facesInRow = 0;

		posesFile.close();

		// break out of the loop if done with all the files
		if(f_n == files.size() -1)
		{
			done = true;
		}
	}

	return 0;
}
void MatchFeatures(const Mat& img_1, const Mat& img_1_orig, 
				   const Mat& img_2, const Mat& img_2_orig,
				   const vector<KeyPoint>& imgpts1,
				   const vector<KeyPoint>& imgpts2,
				   const Mat& descriptors_1, 
				   const Mat& descriptors_2,
				   vector<KeyPoint>& fullpts1,
				   vector<KeyPoint>& fullpts2,
				   int strategy,
				   vector<DMatch>* matches) {
	//strategy
	bool use_features_for_matching =		(strategy & STRATEGY_USE_FEATURE_MATCH) > 0;
	bool use_optical_flow_for_matching =	(strategy & STRATEGY_USE_OPTICAL_FLOW) > 0;
	bool use_dense_optflow =				(strategy & STRATEGY_USE_DENSE_OF) > 0;
	bool use_horiz_disparity =				(strategy & STRATEGY_USE_HORIZ_DISPARITY) > 0;
	
	
	std::vector< DMatch > good_matches_,very_good_matches_;
	std::vector<KeyPoint> keypoints_1, keypoints_2;
	
	Mat_<Point2f> flow_from_features(img_1.size());
#ifdef __SFM__DEBUG__
	Mat outputflow; img_1_orig.copyTo(outputflow);
#endif		
	
	bool update_imgpts1 = (imgpts1.size()<=0);
	bool update_imgpts2 = (imgpts2.size()<=0);
	
	cout << "----------------------------------------------------------------------"<<endl;
	if (update_imgpts1) {
		cout << "imgpts1 empty, get new" << endl;
	} else {
		cout << "imgpts1 has " << imgpts1.size() << " points (descriptors " << descriptors_1.rows << ")" << endl;
	}
	if (update_imgpts2) {
		cout << "imgpts2 empty, get new" << endl;
	} else {
		cout << "imgpts2 has " << imgpts2.size() << " points (descriptors " << descriptors_2.rows << ")" << endl;
	}
	
	if(use_features_for_matching) 
	{	
		//-- Step 1: Detect the keypoints using SURF Detector
		int minHessian = 10;
		
		//		GridAdaptedFeatureDetector detector(new SurfFeatureDetector(minHessian), 1000,1,1);
		SurfFeatureDetector detector( minHessian );
		
		if(update_imgpts1) {
			detector.detect( img_1, keypoints_1 );
		} else {
			keypoints_1 = imgpts1;
		}

		if(update_imgpts2) {
			detector.detect( img_2, keypoints_2 );
		} else {
			keypoints_2 = imgpts2;
		}

		
		//-- Step 2: Calculate descriptors (feature vectors)
		//		SurfDescriptorExtractor extractor(8,4,true);
		SiftDescriptorExtractor extractor(48,16,true);
		//	OpponentColorDescriptorExtractor extractor(new SurfDescriptorExtractor);
		
		if(descriptors_1.empty()) {
//			Mat desc;
//			extractor.compute( img_1, keypoints_1, desc );
//			desc.copyTo(descriptors_1);
			CV_Error(0,"descriptors_1 is empty");
		}
		if(descriptors_2.empty()) {
//			Mat desc;
//			extractor.compute( img_2, keypoints_2, desc );
//			desc.copyTo(descriptors_2);
			CV_Error(0,"descriptors_2 is empty");
		}
		
		//-- Step 3: Matching descriptor vectors using FLANN matcher
		//FlannBasedMatcher matcher;
		BFMatcher matcher(NORM_L2,true); //use an alternative to the ratio test
		std::vector< DMatch > matches_;
		if (matches == NULL) {
			matches = &matches_;
		}
		if (matches->size() == 0) {
#ifdef __SFM__DEBUG__
			cout << "matching desc1="<<descriptors_1.rows<<", desc2="<<descriptors_2.rows<<endl;
#endif
			matcher.match( descriptors_1, descriptors_2, *matches );
		}
		
#ifdef __SFM__DEBUG__
		cout << "matches->size() " << matches->size() << endl;
#endif
		
		double max_dist = 0; double min_dist = 1000.0;
		//-- Quick calculation of max and min distances between keypoints
		for(unsigned int i = 0; i < matches->size(); i++ )
		{ 
			double dist = (*matches)[i].distance;
			if( dist < min_dist ) min_dist = dist;
			if( dist > max_dist ) max_dist = dist;
		}
		
#ifdef __SFM__DEBUG__
		printf("-- Max dist : %f \n", max_dist );
		printf("-- Min dist : %f \n", min_dist );
#endif
		
		vector<KeyPoint> imgpts1_good,imgpts2_good;

		if (min_dist <= 0) {
			min_dist = 10.0;
		}
		
		double cutoff = 4.0*min_dist;
		std::set<int> existing_trainIdx;
		for(unsigned int i = 0; i < matches->size(); i++ )
		{ 
			if ((*matches)[i].trainIdx <= 0) {
				(*matches)[i].trainIdx = (*matches)[i].imgIdx;
			}
			
			if( existing_trainIdx.find((*matches)[i].trainIdx) == existing_trainIdx.end() && 
				(*matches)[i].trainIdx >= 0 && (*matches)[i].trainIdx < (int)(keypoints_2.size()) &&
				(*matches)[i].distance > 0.0 && (*matches)[i].distance < cutoff ) 
			{
				good_matches_.push_back( (*matches)[i]);
				imgpts1_good.push_back(keypoints_1[(*matches)[i].queryIdx]);
				imgpts2_good.push_back(keypoints_2[(*matches)[i].trainIdx]);
				existing_trainIdx.insert((*matches)[i].trainIdx);
			}
		}
		
		
#ifdef __SFM__DEBUG__
		cout << "keypoints_1.size() " << keypoints_1.size() << " imgpts1_good.size() " << imgpts1_good.size() << endl;
		cout << "keypoints_2.size() " << keypoints_2.size() << " imgpts2_good.size() " << imgpts2_good.size() << endl;

		{
			//-- Draw only "good" matches
			Mat img_matches;
			drawMatches( img_1, keypoints_1, img_2, keypoints_2,
						good_matches_, img_matches, Scalar::all(-1), Scalar::all(-1),
						vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );		
			//-- Show detected matches
			imshow( "Feature Matches", img_matches );
			waitKey(100);
			destroyWindow("Feature Matches");
		}
#endif
		
		//Let the feature matching guide the general flow...
		
		vector<uchar> status;
		vector<KeyPoint> imgpts2_very_good,imgpts1_very_good;
		
		
		//Select features that make epipolar sense
		{
			vector<Point2f> pts1,pts2;
			KeyPointsToPoints(imgpts1_good, pts1);
			KeyPointsToPoints(imgpts2_good, pts2);
#ifdef __SFM__DEBUG__
			cout << "pts1 " << pts1.size() << " (orig pts " << imgpts1_good.size() << ")" << endl;
			cout << "pts2 " << pts2.size() << " (orig pts " << imgpts2_good.size() << ")" << endl;
#endif
			Mat F = findFundamentalMat(pts1, pts2, FM_RANSAC, 0.1, 0.99, status);
		}
		cout << "Fundamental mat is keeping " << countNonZero(status) << " / " << status.size() << endl;	
		
		double status_nz = countNonZero(status); 
		double status_sz = status.size();
		double kept_ratio = status_nz / status_sz;
		if (kept_ratio > 0.2) {
			
			for (unsigned int i=0; i<imgpts1_good.size(); i++) {
				if (status[i]) 
				{
					imgpts1_very_good.push_back(imgpts1_good[i]);
					imgpts2_very_good.push_back(imgpts2_good[i]);
				}
			}
			
			if(use_optical_flow_for_matching) {
				//Estimate the overall 2D homography
		//		Mat_<double> H;
		//		{
		//			vector<Point2f> pts1,pts2;
		//			KeyPointsToPoints(imgpts1_very_good, pts1);
		//			KeyPointsToPoints(imgpts2_very_good, pts2);
		//			cout << "pts1 " << pts1.size() << endl;
		//			cout << "pts2 " << pts2.size() << endl;
		//			H = findHomography(pts1, pts2, CV_RANSAC, 0.001);
		//			cout << "homography from features " << endl << H << endl;
		//		}
				Mat_<double> T;
				{
					vector<Point2f> pts1,pts2;
					KeyPointsToPoints(imgpts1_very_good, pts1);
					KeyPointsToPoints(imgpts2_very_good, pts2);
#ifdef __SFM__DEBUG__
					cout << "pts1 " << pts1.size() << endl;
					cout << "pts2 " << pts2.size() << endl;
#endif
					T = estimateRigidTransform(pts1,pts2, false);
#ifdef __SFM__DEBUG__
					cout << "rigid transform from features " << endl << T << endl;
#endif
				}
				
				//Create the approximate flow using the estimated overall motion
				for (int x=0; x<img_1.cols; x++) {
					for (int y=0; y<img_1.rows; y++) {
		//				Mat_<double> moved = H * (Mat_<double>(3,1) << x , y , 1);
						Mat_<double> moved = T * (Mat_<double>(3,1) << x , y , 1);
						Point2f movedpt(moved(0),moved(1));
						flow_from_features(y,x) = Point2f(movedpt.x-x,movedpt.y-y);
#ifdef __SFM__DEBUG__
		//				circle(outputflow, Point(x,y), 1, Scalar(0,255*norm(flow_from_features(y,x))/250), 1);
						if (x%20 == 0 && y%20 == 0) {
		//					cout << "Point " << Point(x,y) << " moved to " << movedpt << endl;
							line(outputflow, Point(x,y), movedpt, Scalar(0,255*norm(flow_from_features(y,x))/50), 1);
						}
#endif
					}
				}
#ifdef __SFM__DEBUG__
				imshow("flow", outputflow);
				waitKey(100);
				destroyWindow("flow");
#endif		
			}
		} 
	} 
	if(use_optical_flow_for_matching) 
	{
#ifdef __SFM__DEBUG__
		img_1_orig.copyTo(outputflow);
#endif		
		double t = getTickCount();
		cout << "Optical Flow...";
		if(use_dense_optflow) {
			cout << "Dense...";
			Mat_<Point2f> _flow,flow;
			if (use_features_for_matching) {
				flow_from_features.copyTo(flow);
			} else {
				//coarse
				calcOpticalFlowFarneback(img_1,img_2,flow,0.5,5,150,60,7,1.5,OPTFLOW_FARNEBACK_GAUSSIAN);
			}

			//refine
			calcOpticalFlowFarneback(img_1,img_2,flow,0.5,2,40,40,5,0.5,OPTFLOW_USE_INITIAL_FLOW);
			calcOpticalFlowFarneback(img_1,img_2,flow,0.5,0,25,40,3,0.25,OPTFLOW_USE_INITIAL_FLOW);
			
			//imgpts1.clear(); imgpts2.clear(); 
			good_matches_.clear(); keypoints_1.clear(); keypoints_2.clear();
			
			for (int x=0;x<flow.cols; x+=1) {
				for (int y=0; y<flow.rows; y+=1) {
					if (norm(flow(y,x)) < 20 || norm(flow(y,x)) > 100) {
						continue; //discard points that havn't moved
					}
					Point2f p(x,y),p1(x+flow(y,x).x,y+flow(y,x).y);
					
					//line(outputflow, p, p1, Scalar(0,255*norm(flow(y,x))/50), 1);
#ifdef __SFM__DEBUG__
					circle(outputflow, p, 1, Scalar(0,255*norm(flow(y,x))/50), 1);
#endif
					if (x%10 == 0 && y%10 == 0) {
//						imgpts1.push_back(KeyPoint(p,1));
//						imgpts2.push_back(KeyPoint(p1,1));
						good_matches_.push_back(DMatch(imgpts1.size()-1,imgpts1.size()-1,1.0));
						keypoints_1.push_back(KeyPoint(p,1));
						keypoints_2.push_back(KeyPoint(p1,1));
					}
					fullpts1.push_back(KeyPoint(p,1));
					fullpts2.push_back(KeyPoint(p1,1));
				}
			}		
		} else {
			vector<Point2f> corners,nextPts; vector<uchar> status; vector<float> err;
			goodFeaturesToTrack(img_1, corners, 2000, 0.001, 10);
			cornerSubPix(img_1, corners, Size(15,15), Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001 ));
			calcOpticalFlowPyrLK(img_1, img_2, corners, nextPts, status, err, Size(45,45));
			for (unsigned int i=0; i<corners.size(); i++) {
				if(status[i] == 1) {
#ifdef __SFM__DEBUG__
					line(outputflow, corners[i], nextPts[i], Scalar(0,255), 1);
#endif
//					imgpts1.push_back(KeyPoint(corners[i],1));
//					imgpts2.push_back(KeyPoint(nextPts[i],1));
					good_matches_.push_back(DMatch(imgpts1.size()-1,imgpts1.size()-1,1.0));
					keypoints_1.push_back(KeyPoint(corners[i],1));
					keypoints_2.push_back(KeyPoint(nextPts[i],1));
				}
			}
		}
		t = ((double)getTickCount() - t)/getTickFrequency();
		cout << "Done. (" << t <<"s)"<< endl;
#ifdef __SFM__DEBUG__
		imshow("flow", outputflow);
		waitKey(100);
		destroyWindow("flow");
#endif
	} 
	else if(use_horiz_disparity) 
	{		
		double downscale = 0.6;
		Mat small_im1; resize(img_1_orig,small_im1,Size(),downscale,downscale);
		Mat small_im2; resize(img_2_orig,small_im2,Size(),downscale,downscale);
		int numberOfDisparities = ((small_im1.cols/8) + 15) & -16;
		
		StereoSGBM sgbm;
		sgbm.preFilterCap = 63;
		sgbm.SADWindowSize = 3;
		
		int cn = img_1_orig.channels();
		
		sgbm.P1 = 8*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
		sgbm.P2 = 32*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
		sgbm.minDisparity = 0;
		sgbm.numberOfDisparities = numberOfDisparities;
		sgbm.uniquenessRatio = 10;
		sgbm.speckleWindowSize = 100;
		sgbm.speckleRange = 32;
		sgbm.disp12MaxDiff = 1;
		sgbm.fullDP = false;
		
		Mat_<short> disp;
		sgbm(small_im1, small_im2, disp);
		Mat disp8; disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));
#ifdef __SFM__DEBUG__
		imshow("disparity",disp8);
		waitKey(0);
		destroyWindow("disparity");
#endif		
		Mat outputflow; img_1_orig.copyTo(outputflow);
		Mat_<short> disp_orig_scale; resize(disp,disp_orig_scale,img_1.size());
		
		for (int x=0;x<disp_orig_scale.cols; x+=1) {
			for (int y=0; y<disp_orig_scale.rows; y+=1) {
				float _d = ((float)disp_orig_scale(y,x))/(16.0 * downscale);
				if (fabsf(_d) > 150.0f || fabsf(_d) < 5.0f) {
					continue; //discard strange points 
				}
				Point2f p(x,y),p1(x-_d,y);
#ifdef __SFM__DEBUG__
				circle(outputflow, p, 1, Scalar(0,255*_d/50.0), 1);
#endif
				if (x%10 == 0 && y%10 == 0) {
//					imgpts1.push_back(KeyPoint(p,1));
//					imgpts2.push_back(KeyPoint(p1,1));
					good_matches_.push_back(DMatch(imgpts1.size()-1,imgpts1.size()-1,1.0));
					keypoints_1.push_back(KeyPoint(p,1));
					keypoints_2.push_back(KeyPoint(p1,1));
				}
				fullpts1.push_back(KeyPoint(p,1));
				fullpts2.push_back(KeyPoint(p1,1));
			}
		}		
#ifdef __SFM__DEBUG__		
		imshow("outputflow", outputflow);
		waitKey(0);
		destroyWindow("outputflow");
#endif
	}
	
	//Draw matches
//	if(0) 
#ifdef __SFM__DEBUG__
	{
		//-- Draw only "good" matches
		Mat img_matches;
		drawMatches( img_1, keypoints_1, img_2, keypoints_2,
					good_matches_, img_matches, Scalar::all(-1), Scalar::all(-1),
					vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );		
		//-- Show detected matches
		imshow( "Good Matches", img_matches );
		waitKey(100);
		destroyWindow("Good Matches");
	}
#endif
}